Import of https://github.com/ciena/voltctl at commit 40d61fbf3f910ed4017cf67c9c79e8e1f82a33a5

Change-Id: I8464c59e60d76cb8612891db3303878975b5416c
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/.gitignore b/vendor/github.com/fullstorydev/grpcurl/.gitignore
new file mode 100644
index 0000000..849ddff
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.gitignore
@@ -0,0 +1 @@
+dist/
diff --git a/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
new file mode 100644
index 0000000..e7bfd3e
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.goreleaser.yml
@@ -0,0 +1,24 @@
+builds:
+  - binary: grpcurl
+    main: ./cmd/grpcurl
+    goos:
+      - linux
+      - darwin
+      - windows
+    goarch:
+      - amd64
+      - 386
+    ldflags:
+      - -s -w -X main.version=v{{.Version}}
+
+archive:
+  format: tar.gz
+  format_overrides:
+    - goos: windows
+      format: zip
+  replacements:
+    amd64: x86_64
+    386: x86_32
+    darwin: osx
+  files:
+    - LICENSE
diff --git a/vendor/github.com/fullstorydev/grpcurl/.travis.yml b/vendor/github.com/fullstorydev/grpcurl/.travis.yml
new file mode 100644
index 0000000..4b0d5eb
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/.travis.yml
@@ -0,0 +1,21 @@
+language: go
+sudo: false
+
+matrix:
+  include:
+    - go: "1.9"
+    - go: "1.10"
+    - go: "1.11"
+      env:
+      - GO111MODULE=off
+      - VET=1
+    - go: "1.11"
+      env: GO111MODULE=on
+    - go: "1.12"
+      env: GO111MODULE=off
+    - go: "1.12"
+      env: GO111MODULE=on
+    - go: tip
+
+script:
+  - if [[ "$VET" = 1 ]]; then make ci; else make deps test; fi
diff --git a/vendor/github.com/fullstorydev/grpcurl/LICENSE b/vendor/github.com/fullstorydev/grpcurl/LICENSE
new file mode 100644
index 0000000..6b678c5
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 FullStory, Inc
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/fullstorydev/grpcurl/Makefile b/vendor/github.com/fullstorydev/grpcurl/Makefile
new file mode 100644
index 0000000..982d043
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/Makefile
@@ -0,0 +1,76 @@
+dev_build_version=$(shell git describe --tags --always --dirty)
+
+# TODO: run golint and errcheck, but only to catch *new* violations and
+# decide whether to change code or not (e.g. we need to be able to whitelist
+# violations already in the code). They can be useful to catch errors, but
+# they are just too noisy to be a requirement for a CI -- we don't even *want*
+# to fix some of the things they consider to be violations.
+.PHONY: ci
+ci: deps checkgofmt vet staticcheck ineffassign predeclared test
+
+.PHONY: deps
+deps:
+	go get -d -v -t ./...
+
+.PHONY: updatedeps
+updatedeps:
+	go get -d -v -t -u -f ./...
+
+.PHONY: install
+install:
+	go install -ldflags '-X "main.version=dev build $(dev_build_version)"' ./...
+
+.PHONY: release
+release:
+	@GO111MODULE=off go get github.com/goreleaser/goreleaser
+	goreleaser --rm-dist
+
+.PHONY: checkgofmt
+checkgofmt:
+	gofmt -s -l .
+	@if [ -n "$$(gofmt -s -l .)" ]; then \
+		exit 1; \
+	fi
+
+.PHONY: vet
+vet:
+	go vet ./...
+
+# TODO: remove the ignored check; need it for now because it
+# is complaining about a deprecated comment added to grpc,
+# but it's not yet released. Once the new (non-deprecated)
+# API is included in a release, we can move to that new
+# version and fix the call site to no longer use deprecated
+# method.
+# This all works fine with Go modules, but without modules,
+# CI is just getting latest master for dependencies like grpc.
+.PHONY: staticcheck
+staticcheck:
+	@go get honnef.co/go/tools/cmd/staticcheck
+	staticcheck ./...
+
+.PHONY: ineffassign
+ineffassign:
+	@go get github.com/gordonklaus/ineffassign
+	ineffassign .
+
+.PHONY: predeclared
+predeclared:
+	@go get github.com/nishanths/predeclared
+	predeclared .
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: golint
+golint:
+	@go get golang.org/x/lint/golint
+	golint -min_confidence 0.9 -set_exit_status ./...
+
+# Intentionally omitted from CI, but target here for ad-hoc reports.
+.PHONY: errcheck
+errcheck:
+	@go get github.com/kisielk/errcheck
+	errcheck ./...
+
+.PHONY: test
+test:
+	go test -race ./...
diff --git a/vendor/github.com/fullstorydev/grpcurl/README.md b/vendor/github.com/fullstorydev/grpcurl/README.md
new file mode 100644
index 0000000..1713f2f
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/README.md
@@ -0,0 +1,214 @@
+# gRPCurl
+[![Build Status](https://travis-ci.org/fullstorydev/grpcurl.svg?branch=master)](https://travis-ci.org/fullstorydev/grpcurl/branches)
+[![Go Report Card](https://goreportcard.com/badge/github.com/fullstorydev/grpcurl)](https://goreportcard.com/report/github.com/fullstorydev/grpcurl)
+
+`grpcurl` is a command-line tool that lets you interact with gRPC servers. It's
+basically `curl` for gRPC servers.
+
+The main purpose for this tool is to invoke RPC methods on a gRPC server from the
+command-line. gRPC servers use a binary encoding on the wire
+([protocol buffers](https://developers.google.com/protocol-buffers/), or "protobufs"
+for short). So they are basically impossible to interact with using regular `curl`
+(and older versions of `curl` that do not support HTTP/2 are of course non-starters).
+This program accepts messages using JSON encoding, which is much more friendly for both
+humans and scripts.
+
+With this tool you can also browse the schema for gRPC services, either by querying
+a server that supports [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto),
+by reading proto source files, or by loading in compiled "protoset" files (files that contain
+encoded file [descriptor protos](https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto)).
+In fact, the way the tool transforms JSON request data into a binary encoded protobuf
+is using that very same schema. So, if the server you interact with does not support
+reflection, you will either need the proto source files that define the service or need
+protoset files that `grpcurl` can use.
+
+This repo also provides a library package, `github.com/fullstorydev/grpcurl`, that has
+functions for simplifying the construction of other command-line tools that dynamically
+invoke gRPC endpoints. This code is a great example of how to use the various packages of
+the [protoreflect](https://godoc.org/github.com/jhump/protoreflect) library, and shows
+off what they can do.
+
+See also the [`grpcurl` talk at GopherCon 2018](https://www.youtube.com/watch?v=dDr-8kbMnaw).
+
+## Features
+`grpcurl` supports all kinds of RPC methods, including streaming methods. You can even
+operate bi-directional streaming methods interactively by running `grpcurl` from an
+interactive terminal and using stdin as the request body!
+
+`grpcurl` supports both plain-text and TLS servers and has numerous options for TLS
+configuration. It also supports mutual TLS, where the client is required to present a
+client certificate.
+
+As mentioned above, `grpcurl` works seamlessly if the server supports the reflection
+service. If not, you can supply the `.proto` source files or you can supply protoset
+files (containing compiled descriptors, produced by `protoc`) to `grpcurl`.
+
+## Installation
+
+### Binaries
+
+Download the binary from the [releases](https://github.com/fullstorydev/grpcurl/releases) page.
+
+On macOS, `grpcurl` is available via Homebrew:
+```shell
+brew install grpcurl
+```
+
+### From Source
+You can use the `go` tool to install `grpcurl`:
+```shell
+go get github.com/fullstorydev/grpcurl
+go install github.com/fullstorydev/grpcurl/cmd/grpcurl
+```
+
+This installs the command into the `bin` sub-folder of wherever your `$GOPATH`
+environment variable points. If this directory is already in your `$PATH`, then
+you should be good to go.
+
+If you have already pulled down this repo to a location that is not in your
+`$GOPATH` and want to build from the sources, you can `cd` into the repo and then
+run `make install`.
+
+If you encounter compile errors, you could have out-dated versions of `grpcurl`'s
+dependencies. You can update the dependencies by running `make updatedeps`. You can
+also use [`vgo`](https://github.com/golang/vgo) to install, which will use the right
+versions of dependencies. Or, if you are using Go 1.11, you can add `GO111MODULE=on`
+as a prefix to the commands above, which will also build using the right versions of
+dependencies (vs. whatever you may already in your `GOPATH`).
+
+## Usage
+The usage doc for the tool explains the numerous options:
+```shell
+grpcurl -help
+```
+
+In the sections below, you will find numerous examples demonstrating how to use
+`grpcurl`.
+
+### Invoking RPCs
+Invoking an RPC on a trusted server (e.g. TLS without self-signed key or custom CA)
+that requires no client certs and supports server reflection is the simplest thing to
+do with `grpcurl`. This minimal invocation sends an empty request body:
+```shell
+grpcurl grpc.server.com:443 my.custom.server.Service/Method
+```
+
+To send a non-empty request, use the `-d` argument. Note that all arguments must come
+*before* the server address and method name:
+```shell
+grpcurl -d '{"id": 1234, "tags": ["foo","bar"]}' \
+    grpc.server.com:443 my.custom.server.Service/Method
+```
+
+As can be seen in the example, the supplied body must be in JSON format. The body will
+be parsed and then transmitted to the server in the protobuf binary format.
+
+If you want to include `grpcurl` in a command pipeline, such as when using `jq` to
+create a request body, you can use `-d @`, which tells `grpcurl` to read the actual
+request body from stdin:
+```shell
+grpcurl -d @ grpc.server.com:443 my.custom.server.Service/Method <<EOM
+{
+  "id": 1234,
+  "tags": [
+    "foor",
+    "bar"
+  ]
+}
+EOM
+```
+
+### Listing Services
+To list all services exposed by a server, use the "list" verb. When using `.proto` source
+or protoset files instead of server reflection, this lists all services defined in the
+source or protoset files.
+```shell
+# Server supports reflection
+grpcurl localhost:8787 list
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin list
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto list
+```
+
+The "list" verb also lets you see all methods in a particular service:
+```shell
+grpcurl localhost:8787 list my.custom.server.Service
+```
+
+### Describing Elements
+The "describe" verb will print the type of any symbol that the server knows about
+or that is found in a given protoset file. It also prints a description of that
+symbol, in the form of snippets of proto source. It won't necessarily be the
+original source that defined the element, but it will be equivalent.
+
+```shell
+# Server supports reflection
+grpcurl localhost:8787 describe my.custom.server.Service.MethodOne
+
+# Using compiled protoset files
+grpcurl -protoset my-protos.bin describe my.custom.server.Service.MethodOne
+
+# Using proto sources
+grpcurl -import-path ../protos -proto my-stuff.proto describe my.custom.server.Service.MethodOne
+```
+
+## Descriptor Sources
+The `grpcurl` tool can operate on a variety of sources for descriptors. The descriptors
+are required, in order for `grpcurl` to understand the RPC schema, translate inputs
+into the protobuf binary format as well as translate responses from the binary format
+into text. The sections below document the supported sources and what command-line flags
+are needed to use them.
+
+### Server Reflection
+
+Without any additional command-line flags, `grpcurl` will try to use [server reflection](https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto).
+
+Examples for how to set up server reflection can be found [here](https://github.com/grpc/grpc/blob/master/doc/server-reflection.md#known-implementations).
+
+When using reflection, the server address (host:port or path to Unix socket) is required
+even for "list" and "describe" operations, so that `grpcurl` can connect to the server
+and ask it for its descriptors.
+
+### Proto Source Files
+To use `grpcurl` on servers that do not support reflection, you can use `.proto` source
+files.
+
+In addition to using `-proto` flags to point `grpcurl` at the relevant proto source file(s),
+you may also need to supply `-import-path` flags to tell `grpcurl` the folders from which
+dependencies can be imported.
+
+Just like when compiling with `protoc`, you do *not* need to provide an import path for the
+location of the standard protos included with `protoc` (which contain various "well-known
+types" with a package definition of `google.protobuf`). These files are "known" by `grpcurl`
+as a snapshot of their descriptors is built into the `grpcurl` binary.
+
+When using proto sources, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the proto
+source files.
+
+### Protoset Files
+You can also use compiled protoset files with `grpcurl`. If you are scripting `grpcurl` and
+need to re-use the same proto sources for many invocations, you will see better performance
+by using protoset files (since it skips the parsing and compilation steps with each
+invocation).
+
+Protoset files contain binary encoded `google.protobuf.FileDescriptorSet` protos. To create
+a protoset file, invoke `protoc` with the `*.proto` files that define the service:
+```shell
+protoc --proto_path=. \
+    --descriptor_set_out=myservice.protoset \
+    --include_imports \
+    my/custom/server/service.proto
+```
+
+The `--descriptor_set_out` argument is what tells `protoc` to produce a protoset,
+and the `--include_imports` argument is necessary for the protoset to contain
+everything that `grpcurl` needs to process and understand the schema.
+
+When using protosets, you can omit the server address (host:port or path to Unix socket)
+when using the "list" and "describe" operations since they only need to consult the
+protoset files.
+
diff --git a/vendor/github.com/fullstorydev/grpcurl/desc_source.go b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
new file mode 100644
index 0000000..c23ae3d
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/desc_source.go
@@ -0,0 +1,253 @@
+package grpcurl
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/protoparse"
+	"github.com/jhump/protoreflect/dynamic"
+	"github.com/jhump/protoreflect/grpcreflect"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// ErrReflectionNotSupported is returned by DescriptorSource operations that
+// rely on interacting with the reflection service when the source does not
+// actually expose the reflection service. When this occurs, an alternate source
+// (like file descriptor sets) must be used.
+var ErrReflectionNotSupported = errors.New("server does not support the reflection API")
+
+// DescriptorSource is a source of protobuf descriptor information. It can be backed by a FileDescriptorSet
+// proto (like a file generated by protoc) or a remote server that supports the reflection API.
+type DescriptorSource interface {
+	// ListServices returns a list of fully-qualified service names. It will be all services in a set of
+	// descriptor files or the set of all services exposed by a gRPC server.
+	ListServices() ([]string, error)
+	// FindSymbol returns a descriptor for the given fully-qualified symbol name.
+	FindSymbol(fullyQualifiedName string) (desc.Descriptor, error)
+	// AllExtensionsForType returns all known extension fields that extend the given message type name.
+	AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error)
+}
+
+// DescriptorSourceFromProtoSets creates a DescriptorSource that is backed by the named files, whose contents
+// are encoded FileDescriptorSet protos.
+func DescriptorSourceFromProtoSets(fileNames ...string) (DescriptorSource, error) {
+	files := &descpb.FileDescriptorSet{}
+	for _, fileName := range fileNames {
+		b, err := ioutil.ReadFile(fileName)
+		if err != nil {
+			return nil, fmt.Errorf("could not load protoset file %q: %v", fileName, err)
+		}
+		var fs descpb.FileDescriptorSet
+		err = proto.Unmarshal(b, &fs)
+		if err != nil {
+			return nil, fmt.Errorf("could not parse contents of protoset file %q: %v", fileName, err)
+		}
+		files.File = append(files.File, fs.File...)
+	}
+	return DescriptorSourceFromFileDescriptorSet(files)
+}
+
+// DescriptorSourceFromProtoFiles creates a DescriptorSource that is backed by the named files,
+// whose contents are Protocol Buffer source files. The given importPaths are used to locate
+// any imported files.
+func DescriptorSourceFromProtoFiles(importPaths []string, fileNames ...string) (DescriptorSource, error) {
+	fileNames, err := protoparse.ResolveFilenames(importPaths, fileNames...)
+	if err != nil {
+		return nil, err
+	}
+	p := protoparse.Parser{
+		ImportPaths:           importPaths,
+		InferImportPaths:      len(importPaths) == 0,
+		IncludeSourceCodeInfo: true,
+	}
+	fds, err := p.ParseFiles(fileNames...)
+	if err != nil {
+		return nil, fmt.Errorf("could not parse given files: %v", err)
+	}
+	return DescriptorSourceFromFileDescriptors(fds...)
+}
+
+// DescriptorSourceFromFileDescriptorSet creates a DescriptorSource that is backed by the FileDescriptorSet.
+func DescriptorSourceFromFileDescriptorSet(files *descpb.FileDescriptorSet) (DescriptorSource, error) {
+	unresolved := map[string]*descpb.FileDescriptorProto{}
+	for _, fd := range files.File {
+		unresolved[fd.GetName()] = fd
+	}
+	resolved := map[string]*desc.FileDescriptor{}
+	for _, fd := range files.File {
+		_, err := resolveFileDescriptor(unresolved, resolved, fd.GetName())
+		if err != nil {
+			return nil, err
+		}
+	}
+	return &fileSource{files: resolved}, nil
+}
+
+func resolveFileDescriptor(unresolved map[string]*descpb.FileDescriptorProto, resolved map[string]*desc.FileDescriptor, filename string) (*desc.FileDescriptor, error) {
+	if r, ok := resolved[filename]; ok {
+		return r, nil
+	}
+	fd, ok := unresolved[filename]
+	if !ok {
+		return nil, fmt.Errorf("no descriptor found for %q", filename)
+	}
+	deps := make([]*desc.FileDescriptor, 0, len(fd.GetDependency()))
+	for _, dep := range fd.GetDependency() {
+		depFd, err := resolveFileDescriptor(unresolved, resolved, dep)
+		if err != nil {
+			return nil, err
+		}
+		deps = append(deps, depFd)
+	}
+	result, err := desc.CreateFileDescriptor(fd, deps...)
+	if err != nil {
+		return nil, err
+	}
+	resolved[filename] = result
+	return result, nil
+}
+
+// DescriptorSourceFromFileDescriptors creates a DescriptorSource that is backed by the given
+// file descriptors
+func DescriptorSourceFromFileDescriptors(files ...*desc.FileDescriptor) (DescriptorSource, error) {
+	fds := map[string]*desc.FileDescriptor{}
+	for _, fd := range files {
+		if err := addFile(fd, fds); err != nil {
+			return nil, err
+		}
+	}
+	return &fileSource{files: fds}, nil
+}
+
+func addFile(fd *desc.FileDescriptor, fds map[string]*desc.FileDescriptor) error {
+	name := fd.GetName()
+	if existing, ok := fds[name]; ok {
+		// already added this file
+		if existing != fd {
+			// doh! duplicate files provided
+			return fmt.Errorf("given files include multiple copies of %q", name)
+		}
+		return nil
+	}
+	fds[name] = fd
+	for _, dep := range fd.GetDependencies() {
+		if err := addFile(dep, fds); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+type fileSource struct {
+	files  map[string]*desc.FileDescriptor
+	er     *dynamic.ExtensionRegistry
+	erInit sync.Once
+}
+
+func (fs *fileSource) ListServices() ([]string, error) {
+	set := map[string]bool{}
+	for _, fd := range fs.files {
+		for _, svc := range fd.GetServices() {
+			set[svc.GetFullyQualifiedName()] = true
+		}
+	}
+	sl := make([]string, 0, len(set))
+	for svc := range set {
+		sl = append(sl, svc)
+	}
+	return sl, nil
+}
+
+// GetAllFiles returns all of the underlying file descriptors. This is
+// more thorough and more efficient than the fallback strategy used by
+// the GetAllFiles package method, for enumerating all files from a
+// descriptor source.
+func (fs *fileSource) GetAllFiles() ([]*desc.FileDescriptor, error) {
+	files := make([]*desc.FileDescriptor, len(fs.files))
+	i := 0
+	for _, fd := range fs.files {
+		files[i] = fd
+		i++
+	}
+	return files, nil
+}
+
+func (fs *fileSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+	for _, fd := range fs.files {
+		if dsc := fd.FindSymbol(fullyQualifiedName); dsc != nil {
+			return dsc, nil
+		}
+	}
+	return nil, notFound("Symbol", fullyQualifiedName)
+}
+
+func (fs *fileSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+	fs.erInit.Do(func() {
+		fs.er = &dynamic.ExtensionRegistry{}
+		for _, fd := range fs.files {
+			fs.er.AddExtensionsFromFile(fd)
+		}
+	})
+	return fs.er.AllExtensionsForType(typeName), nil
+}
+
+// DescriptorSourceFromServer creates a DescriptorSource that uses the given gRPC reflection client
+// to interrogate a server for descriptor information. If the server does not support the reflection
+// API then the various DescriptorSource methods will return ErrReflectionNotSupported
+func DescriptorSourceFromServer(_ context.Context, refClient *grpcreflect.Client) DescriptorSource {
+	return serverSource{client: refClient}
+}
+
+type serverSource struct {
+	client *grpcreflect.Client
+}
+
+func (ss serverSource) ListServices() ([]string, error) {
+	svcs, err := ss.client.ListServices()
+	return svcs, reflectionSupport(err)
+}
+
+func (ss serverSource) FindSymbol(fullyQualifiedName string) (desc.Descriptor, error) {
+	file, err := ss.client.FileContainingSymbol(fullyQualifiedName)
+	if err != nil {
+		return nil, reflectionSupport(err)
+	}
+	d := file.FindSymbol(fullyQualifiedName)
+	if d == nil {
+		return nil, notFound("Symbol", fullyQualifiedName)
+	}
+	return d, nil
+}
+
+func (ss serverSource) AllExtensionsForType(typeName string) ([]*desc.FieldDescriptor, error) {
+	var exts []*desc.FieldDescriptor
+	nums, err := ss.client.AllExtensionNumbersForType(typeName)
+	if err != nil {
+		return nil, reflectionSupport(err)
+	}
+	for _, fieldNum := range nums {
+		ext, err := ss.client.ResolveExtension(typeName, fieldNum)
+		if err != nil {
+			return nil, reflectionSupport(err)
+		}
+		exts = append(exts, ext)
+	}
+	return exts, nil
+}
+
+func reflectionSupport(err error) error {
+	if err == nil {
+		return nil
+	}
+	if stat, ok := status.FromError(err); ok && stat.Code() == codes.Unimplemented {
+		return ErrReflectionNotSupported
+	}
+	return err
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/format.go b/vendor/github.com/fullstorydev/grpcurl/format.go
new file mode 100644
index 0000000..db93eb4
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/format.go
@@ -0,0 +1,469 @@
+package grpcurl
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/dynamic"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// RequestParser processes input into messages.
+type RequestParser interface {
+	// Next parses input data into the given request message. If called after
+	// input is exhausted, it returns io.EOF. If the caller re-uses the same
+	// instance in multiple calls to Next, it should call msg.Reset() in between
+	// each call.
+	Next(msg proto.Message) error
+	// NumRequests returns the number of messages that have been parsed and
+	// returned by a call to Next.
+	NumRequests() int
+}
+
+type jsonRequestParser struct {
+	dec          *json.Decoder
+	unmarshaler  jsonpb.Unmarshaler
+	requestCount int
+}
+
+// NewJSONRequestParser returns a RequestParser that reads data in JSON format
+// from the given reader. The given resolver is used to assist with decoding of
+// google.protobuf.Any messages.
+//
+// Input data that contains more than one message should just include all
+// messages concatenated (though whitespace is necessary to separate some kinds
+// of values in JSON).
+//
+// If the given reader has no data, the returned parser will return io.EOF on
+// the very first call.
+func NewJSONRequestParser(in io.Reader, resolver jsonpb.AnyResolver) RequestParser {
+	return &jsonRequestParser{
+		dec:         json.NewDecoder(in),
+		unmarshaler: jsonpb.Unmarshaler{AnyResolver: resolver},
+	}
+}
+
+func (f *jsonRequestParser) Next(m proto.Message) error {
+	var msg json.RawMessage
+	if err := f.dec.Decode(&msg); err != nil {
+		return err
+	}
+	f.requestCount++
+	return f.unmarshaler.Unmarshal(bytes.NewReader(msg), m)
+}
+
+func (f *jsonRequestParser) NumRequests() int {
+	return f.requestCount
+}
+
+const (
+	textSeparatorChar = 0x1e
+)
+
+type textRequestParser struct {
+	r            *bufio.Reader
+	err          error
+	requestCount int
+}
+
+// NewTextRequestParser returns a RequestParser that reads data in the protobuf
+// text format from the given reader.
+//
+// Input data that contains more than one message should include an ASCII
+// 'Record Separator' character (0x1E) between each message.
+//
+// Empty text is a valid text format and represents an empty message. So if the
+// given reader has no data, the returned parser will yield an empty message
+// for the first call to Next and then return io.EOF thereafter. This also means
+// that if the input data ends with a record separator, then a final empty
+// message will be parsed *after* the separator.
+func NewTextRequestParser(in io.Reader) RequestParser {
+	return &textRequestParser{r: bufio.NewReader(in)}
+}
+
+func (f *textRequestParser) Next(m proto.Message) error {
+	if f.err != nil {
+		return f.err
+	}
+
+	var b []byte
+	b, f.err = f.r.ReadBytes(textSeparatorChar)
+	if f.err != nil && f.err != io.EOF {
+		return f.err
+	}
+	// remove delimiter
+	if len(b) > 0 && b[len(b)-1] == textSeparatorChar {
+		b = b[:len(b)-1]
+	}
+
+	f.requestCount++
+
+	return proto.UnmarshalText(string(b), m)
+}
+
+func (f *textRequestParser) NumRequests() int {
+	return f.requestCount
+}
+
+// Formatter translates messages into string representations.
+type Formatter func(proto.Message) (string, error)
+
+// NewJSONFormatter returns a formatter that returns JSON strings. The JSON will
+// include empty/default values (instead of just omitted them) if emitDefaults
+// is true. The given resolver is used to assist with encoding of
+// google.protobuf.Any messages.
+func NewJSONFormatter(emitDefaults bool, resolver jsonpb.AnyResolver) Formatter {
+	marshaler := jsonpb.Marshaler{
+		EmitDefaults: emitDefaults,
+		Indent:       "  ",
+		AnyResolver:  resolver,
+	}
+	return marshaler.MarshalToString
+}
+
+// NewTextFormatter returns a formatter that returns strings in the protobuf
+// text format. If includeSeparator is true then, when invoked to format
+// multiple messages, all messages after the first one will be prefixed with the
+// ASCII 'Record Separator' character (0x1E).
+func NewTextFormatter(includeSeparator bool) Formatter {
+	tf := textFormatter{useSeparator: includeSeparator}
+	return tf.format
+}
+
+type textFormatter struct {
+	useSeparator bool
+	numFormatted int
+}
+
+var protoTextMarshaler = proto.TextMarshaler{ExpandAny: true}
+
+func (tf *textFormatter) format(m proto.Message) (string, error) {
+	var buf bytes.Buffer
+	if tf.useSeparator && tf.numFormatted > 0 {
+		if err := buf.WriteByte(textSeparatorChar); err != nil {
+			return "", err
+		}
+	}
+
+	// If message implements MarshalText method (such as a *dynamic.Message),
+	// it won't get details about whether or not to format to text compactly
+	// or with indentation. So first see if the message also implements a
+	// MarshalTextIndent method and use that instead if available.
+	type indentMarshaler interface {
+		MarshalTextIndent() ([]byte, error)
+	}
+
+	if indenter, ok := m.(indentMarshaler); ok {
+		b, err := indenter.MarshalTextIndent()
+		if err != nil {
+			return "", err
+		}
+		if _, err := buf.Write(b); err != nil {
+			return "", err
+		}
+	} else if err := protoTextMarshaler.Marshal(&buf, m); err != nil {
+		return "", err
+	}
+
+	// no trailing newline needed
+	str := buf.String()
+	if str[len(str)-1] == '\n' {
+		str = str[:len(str)-1]
+	}
+
+	tf.numFormatted++
+
+	return str, nil
+}
+
+type Format string
+
+const (
+	FormatJSON = Format("json")
+	FormatText = Format("text")
+)
+
+// AnyResolverFromDescriptorSource returns an AnyResolver that will search for
+// types using the given descriptor source.
+func AnyResolverFromDescriptorSource(source DescriptorSource) jsonpb.AnyResolver {
+	return &anyResolver{source: source}
+}
+
+// AnyResolverFromDescriptorSourceWithFallback returns an AnyResolver that will
+// search for types using the given descriptor source and then fallback to a
+// special message if the type is not found. The fallback type will render to
+// JSON with a "@type" property, just like an Any message, but also with a
+// custom "@value" property that includes the binary encoded payload.
+func AnyResolverFromDescriptorSourceWithFallback(source DescriptorSource) jsonpb.AnyResolver {
+	res := anyResolver{source: source}
+	return &anyResolverWithFallback{AnyResolver: &res}
+}
+
+type anyResolver struct {
+	source DescriptorSource
+
+	er dynamic.ExtensionRegistry
+
+	mu       sync.RWMutex
+	mf       *dynamic.MessageFactory
+	resolved map[string]func() proto.Message
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+
+	r.mu.RLock()
+	factory := r.resolved[mname]
+	r.mu.RUnlock()
+
+	// already resolved?
+	if factory != nil {
+		return factory(), nil
+	}
+
+	r.mu.Lock()
+	defer r.mu.Unlock()
+
+	// double-check, in case we were racing with another goroutine
+	// that resolved this one
+	factory = r.resolved[mname]
+	if factory != nil {
+		return factory(), nil
+	}
+
+	// use descriptor source to resolve message type
+	d, err := r.source.FindSymbol(mname)
+	if err != nil {
+		return nil, err
+	}
+	md, ok := d.(*desc.MessageDescriptor)
+	if !ok {
+		return nil, fmt.Errorf("unknown message: %s", typeUrl)
+	}
+	// populate any extensions for this message, too
+	if exts, err := r.source.AllExtensionsForType(mname); err != nil {
+		return nil, err
+	} else if err := r.er.AddExtension(exts...); err != nil {
+		return nil, err
+	}
+
+	if r.mf == nil {
+		r.mf = dynamic.NewMessageFactoryWithExtensionRegistry(&r.er)
+	}
+
+	factory = func() proto.Message {
+		return r.mf.NewMessage(md)
+	}
+	if r.resolved == nil {
+		r.resolved = map[string]func() proto.Message{}
+	}
+	r.resolved[mname] = factory
+	return factory(), nil
+}
+
+// anyResolverWithFallback can provide a fallback value for unknown
+// messages that will format itself to JSON using an "@value" field
+// that has the base64-encoded data for the unknown message value.
+type anyResolverWithFallback struct {
+	jsonpb.AnyResolver
+}
+
+func (r anyResolverWithFallback) Resolve(typeUrl string) (proto.Message, error) {
+	msg, err := r.AnyResolver.Resolve(typeUrl)
+	if err == nil {
+		return msg, err
+	}
+
+	// Try "default" resolution logic. This mirrors the default behavior
+	// of jsonpb, which checks to see if the given message name is registered
+	// in the proto package.
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+	mt := proto.MessageType(mname)
+	if mt != nil {
+		return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+	}
+
+	// finally, fallback to a special placeholder that can marshal itself
+	// to JSON using a special "@value" property to show base64-encoded
+	// data for the embedded message
+	return &unknownAny{TypeUrl: typeUrl, Error: fmt.Sprintf("%s is not recognized; see @value for raw binary message data", mname)}, nil
+}
+
+type unknownAny struct {
+	TypeUrl string `json:"@type"`
+	Error   string `json:"@error"`
+	Value   string `json:"@value"`
+}
+
+func (a *unknownAny) MarshalJSONPB(jsm *jsonpb.Marshaler) ([]byte, error) {
+	if jsm.Indent != "" {
+		return json.MarshalIndent(a, "", jsm.Indent)
+	}
+	return json.Marshal(a)
+}
+
+func (a *unknownAny) Unmarshal(b []byte) error {
+	a.Value = base64.StdEncoding.EncodeToString(b)
+	return nil
+}
+
+func (a *unknownAny) Reset() {
+	a.Value = ""
+}
+
+func (a *unknownAny) String() string {
+	b, err := a.MarshalJSONPB(&jsonpb.Marshaler{})
+	if err != nil {
+		return fmt.Sprintf("ERROR: %v", err.Error())
+	}
+	return string(b)
+}
+
+func (a *unknownAny) ProtoMessage() {
+}
+
+var _ proto.Message = (*unknownAny)(nil)
+
+// RequestParserAndFormatterFor returns a request parser and formatter for the
+// given format. The given descriptor source may be used for parsing message
+// data (if needed by the format). The flags emitJSONDefaultFields and
+// includeTextSeparator are options for JSON and protobuf text formats,
+// respectively. Requests will be parsed from the given in.
+func RequestParserAndFormatterFor(format Format, descSource DescriptorSource, emitJSONDefaultFields, includeTextSeparator bool, in io.Reader) (RequestParser, Formatter, error) {
+	switch format {
+	case FormatJSON:
+		resolver := AnyResolverFromDescriptorSource(descSource)
+		return NewJSONRequestParser(in, resolver), NewJSONFormatter(emitJSONDefaultFields, anyResolverWithFallback{AnyResolver: resolver}), nil
+	case FormatText:
+		return NewTextRequestParser(in), NewTextFormatter(includeTextSeparator), nil
+	default:
+		return nil, nil, fmt.Errorf("unknown format: %s", format)
+	}
+}
+
+// DefaultEventHandler logs events to a writer. This is not thread-safe, but is
+// safe for use with InvokeRPC as long as NumResponses and Status are not read
+// until the call to InvokeRPC completes.
+type DefaultEventHandler struct {
+	out        io.Writer
+	descSource DescriptorSource
+	formatter  func(proto.Message) (string, error)
+	verbose    bool
+
+	// NumResponses is the number of responses that have been received.
+	NumResponses int
+	// Status is the status that was received at the end of an RPC. It is
+	// nil if the RPC is still in progress.
+	Status *status.Status
+}
+
+// NewDefaultEventHandler returns an InvocationEventHandler that logs events to
+// the given output. If verbose is true, all events are logged. Otherwise, only
+// response messages are logged.
+func NewDefaultEventHandler(out io.Writer, descSource DescriptorSource, formatter Formatter, verbose bool) *DefaultEventHandler {
+	return &DefaultEventHandler{
+		out:        out,
+		descSource: descSource,
+		formatter:  formatter,
+		verbose:    verbose,
+	}
+}
+
+var _ InvocationEventHandler = (*DefaultEventHandler)(nil)
+
+func (h *DefaultEventHandler) OnResolveMethod(md *desc.MethodDescriptor) {
+	if h.verbose {
+		txt, err := GetDescriptorText(md, h.descSource)
+		if err == nil {
+			fmt.Fprintf(h.out, "\nResolved method descriptor:\n%s\n", txt)
+		}
+	}
+}
+
+func (h *DefaultEventHandler) OnSendHeaders(md metadata.MD) {
+	if h.verbose {
+		fmt.Fprintf(h.out, "\nRequest metadata to send:\n%s\n", MetadataToString(md))
+	}
+}
+
+func (h *DefaultEventHandler) OnReceiveHeaders(md metadata.MD) {
+	if h.verbose {
+		fmt.Fprintf(h.out, "\nResponse headers received:\n%s\n", MetadataToString(md))
+	}
+}
+
+func (h *DefaultEventHandler) OnReceiveResponse(resp proto.Message) {
+	h.NumResponses++
+	if h.verbose {
+		fmt.Fprint(h.out, "\nResponse contents:\n")
+	}
+	if respStr, err := h.formatter(resp); err != nil {
+		fmt.Fprintf(h.out, "Failed to format response message %d: %v\n", h.NumResponses, err)
+	} else {
+		fmt.Fprintln(h.out, respStr)
+	}
+}
+
+func (h *DefaultEventHandler) OnReceiveTrailers(stat *status.Status, md metadata.MD) {
+	h.Status = stat
+	if h.verbose {
+		fmt.Fprintf(h.out, "\nResponse trailers received:\n%s\n", MetadataToString(md))
+	}
+}
+
+// PrintStatus prints details about the given status to the given writer. The given
+// formatter is used to print any detail messages that may be included in the status.
+// If the given status has a code of OK, "OK" is printed and that is all. Otherwise,
+// "ERROR:" is printed along with a line showing the code, one showing the message
+// string, and each detail message if any are present. The detail messages will be
+// printed as proto text format or JSON, depending on the given formatter.
+func PrintStatus(w io.Writer, stat *status.Status, formatter Formatter) {
+	if stat.Code() == codes.OK {
+		fmt.Fprintln(w, "OK")
+		return
+	}
+	fmt.Fprintf(w, "ERROR:\n  Code: %s\n  Message: %s\n", stat.Code().String(), stat.Message())
+
+	statpb := stat.Proto()
+	if len(statpb.Details) > 0 {
+		fmt.Fprintf(w, "  Details:\n")
+		for i, det := range statpb.Details {
+			prefix := fmt.Sprintf("  %d)", i+1)
+			fmt.Fprintf(w, "%s\t", prefix)
+			prefix = strings.Repeat(" ", len(prefix)) + "\t"
+
+			output, err := formatter(det)
+			if err != nil {
+				fmt.Fprintf(w, "Error parsing detail message: %v\n", err)
+			} else {
+				lines := strings.Split(output, "\n")
+				for i, line := range lines {
+					if i == 0 {
+						// first line is already indented
+						fmt.Fprintf(w, "%s\n", line)
+					} else {
+						fmt.Fprintf(w, "%s%s\n", prefix, line)
+					}
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/go.mod b/vendor/github.com/fullstorydev/grpcurl/go.mod
new file mode 100644
index 0000000..f6af37d
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/go.mod
@@ -0,0 +1,8 @@
+module github.com/fullstorydev/grpcurl
+
+require (
+	github.com/golang/protobuf v1.3.1
+	github.com/jhump/protoreflect v1.4.1
+	golang.org/x/net v0.0.0-20190311183353-d8887717615a
+	google.golang.org/grpc v1.21.0
+)
diff --git a/vendor/github.com/fullstorydev/grpcurl/go.sum b/vendor/github.com/fullstorydev/grpcurl/go.sum
new file mode 100644
index 0000000..466dbb1
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/go.sum
@@ -0,0 +1,30 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/jhump/protoreflect v1.4.1 h1:tgahjuElRiJthp9JfaMUFxabBVIytT/lnMSadY5kMjM=
+github.com/jhump/protoreflect v1.4.1/go.mod h1:gZ3i/BeD62fjlaIL0VW4UDMT70CTX+3m4pOnAlJ0BX8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/genproto v0.0.0-20170818100345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.21.0 h1:G+97AoqBnmZIT91cLG/EkCoK9NSelj64P8bOHHNmGn0=
+google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
diff --git a/vendor/github.com/fullstorydev/grpcurl/grpcurl.go b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
new file mode 100644
index 0000000..64947de
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/grpcurl.go
@@ -0,0 +1,622 @@
+// Package grpcurl provides the core functionality exposed by the grpcurl command, for
+// dynamically connecting to a server, using the reflection service to inspect the server,
+// and invoking RPCs. The grpcurl command-line tool constructs a DescriptorSource, based
+// on the command-line parameters, and supplies an InvocationEventHandler to supply request
+// data (which can come from command-line args or the process's stdin) and to log the
+// events (to the process's stdout).
+package grpcurl
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"net"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	descpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"github.com/golang/protobuf/ptypes"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/golang/protobuf/ptypes/struct"
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/protoprint"
+	"github.com/jhump/protoreflect/dynamic"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/credentials"
+	"google.golang.org/grpc/metadata"
+)
+
+// ListServices uses the given descriptor source to return a sorted list of fully-qualified
+// service names.
+func ListServices(source DescriptorSource) ([]string, error) {
+	svcs, err := source.ListServices()
+	if err != nil {
+		return nil, err
+	}
+	sort.Strings(svcs)
+	return svcs, nil
+}
+
+type sourceWithFiles interface {
+	GetAllFiles() ([]*desc.FileDescriptor, error)
+}
+
+var _ sourceWithFiles = (*fileSource)(nil)
+
+// GetAllFiles uses the given descriptor source to return a list of file descriptors.
+func GetAllFiles(source DescriptorSource) ([]*desc.FileDescriptor, error) {
+	var files []*desc.FileDescriptor
+	srcFiles, ok := source.(sourceWithFiles)
+
+	// If an error occurs, we still try to load as many files as we can, so that
+	// caller can decide whether to ignore error or not.
+	var firstError error
+	if ok {
+		files, firstError = srcFiles.GetAllFiles()
+	} else {
+		// Source does not implement GetAllFiles method, so use ListServices
+		// and grab files from there.
+		svcNames, err := source.ListServices()
+		if err != nil {
+			firstError = err
+		} else {
+			allFiles := map[string]*desc.FileDescriptor{}
+			for _, name := range svcNames {
+				d, err := source.FindSymbol(name)
+				if err != nil {
+					if firstError == nil {
+						firstError = err
+					}
+				} else {
+					addAllFilesToSet(d.GetFile(), allFiles)
+				}
+			}
+			files = make([]*desc.FileDescriptor, len(allFiles))
+			i := 0
+			for _, fd := range allFiles {
+				files[i] = fd
+				i++
+			}
+		}
+	}
+
+	sort.Sort(filesByName(files))
+	return files, firstError
+}
+
+type filesByName []*desc.FileDescriptor
+
+func (f filesByName) Len() int {
+	return len(f)
+}
+
+func (f filesByName) Less(i, j int) bool {
+	return f[i].GetName() < f[j].GetName()
+}
+
+func (f filesByName) Swap(i, j int) {
+	f[i], f[j] = f[j], f[i]
+}
+
+func addAllFilesToSet(fd *desc.FileDescriptor, all map[string]*desc.FileDescriptor) {
+	if _, ok := all[fd.GetName()]; ok {
+		// already added
+		return
+	}
+	all[fd.GetName()] = fd
+	for _, dep := range fd.GetDependencies() {
+		addAllFilesToSet(dep, all)
+	}
+}
+
+// ListMethods uses the given descriptor source to return a sorted list of method names
+// for the specified fully-qualified service name.
+func ListMethods(source DescriptorSource, serviceName string) ([]string, error) {
+	dsc, err := source.FindSymbol(serviceName)
+	if err != nil {
+		return nil, err
+	}
+	if sd, ok := dsc.(*desc.ServiceDescriptor); !ok {
+		return nil, notFound("Service", serviceName)
+	} else {
+		methods := make([]string, 0, len(sd.GetMethods()))
+		for _, method := range sd.GetMethods() {
+			methods = append(methods, method.GetFullyQualifiedName())
+		}
+		sort.Strings(methods)
+		return methods, nil
+	}
+}
+
+// MetadataFromHeaders converts a list of header strings (each string in
+// "Header-Name: Header-Value" form) into metadata. If a string has a header
+// name without a value (e.g. does not contain a colon), the value is assumed
+// to be blank. Binary headers (those whose names end in "-bin") should be
+// base64-encoded. But if they cannot be base64-decoded, they will be assumed to
+// be in raw form and used as is.
+func MetadataFromHeaders(headers []string) metadata.MD {
+	md := make(metadata.MD)
+	for _, part := range headers {
+		if part != "" {
+			pieces := strings.SplitN(part, ":", 2)
+			if len(pieces) == 1 {
+				pieces = append(pieces, "") // if no value was specified, just make it "" (maybe the header value doesn't matter)
+			}
+			headerName := strings.ToLower(strings.TrimSpace(pieces[0]))
+			val := strings.TrimSpace(pieces[1])
+			if strings.HasSuffix(headerName, "-bin") {
+				if v, err := decode(val); err == nil {
+					val = v
+				}
+			}
+			md[headerName] = append(md[headerName], val)
+		}
+	}
+	return md
+}
+
+var base64Codecs = []*base64.Encoding{base64.StdEncoding, base64.URLEncoding, base64.RawStdEncoding, base64.RawURLEncoding}
+
+func decode(val string) (string, error) {
+	var firstErr error
+	var b []byte
+	// we are lenient and can accept any of the flavors of base64 encoding
+	for _, d := range base64Codecs {
+		var err error
+		b, err = d.DecodeString(val)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+		return string(b), nil
+	}
+	return "", firstErr
+}
+
+// MetadataToString returns a string representation of the given metadata, for
+// displaying to users.
+func MetadataToString(md metadata.MD) string {
+	if len(md) == 0 {
+		return "(empty)"
+	}
+
+	keys := make([]string, 0, len(md))
+	for k := range md {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+
+	var b bytes.Buffer
+	first := true
+	for _, k := range keys {
+		vs := md[k]
+		for _, v := range vs {
+			if first {
+				first = false
+			} else {
+				b.WriteString("\n")
+			}
+			b.WriteString(k)
+			b.WriteString(": ")
+			if strings.HasSuffix(k, "-bin") {
+				v = base64.StdEncoding.EncodeToString([]byte(v))
+			}
+			b.WriteString(v)
+		}
+	}
+	return b.String()
+}
+
+var printer = &protoprint.Printer{
+	Compact:                  true,
+	OmitComments:             protoprint.CommentsNonDoc,
+	SortElements:             true,
+	ForceFullyQualifiedNames: true,
+}
+
+// GetDescriptorText returns a string representation of the given descriptor.
+// This returns a snippet of proto source that describes the given element.
+func GetDescriptorText(dsc desc.Descriptor, _ DescriptorSource) (string, error) {
+	// Note: DescriptorSource is not used, but remains an argument for backwards
+	// compatibility with previous implementation.
+	txt, err := printer.PrintProtoToString(dsc)
+	if err != nil {
+		return "", err
+	}
+	// callers don't expect trailing newlines
+	if txt[len(txt)-1] == '\n' {
+		txt = txt[:len(txt)-1]
+	}
+	return txt, nil
+}
+
+// EnsureExtensions uses the given descriptor source to download extensions for
+// the given message. It returns a copy of the given message, but as a dynamic
+// message that knows about all extensions known to the given descriptor source.
+func EnsureExtensions(source DescriptorSource, msg proto.Message) proto.Message {
+	// load any server extensions so we can properly describe custom options
+	dsc, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return msg
+	}
+
+	var ext dynamic.ExtensionRegistry
+	if err = fetchAllExtensions(source, &ext, dsc, map[string]bool{}); err != nil {
+		return msg
+	}
+
+	// convert message into dynamic message that knows about applicable extensions
+	// (that way we can show meaningful info for custom options instead of printing as unknown)
+	msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+	dm, err := fullyConvertToDynamic(msgFactory, msg)
+	if err != nil {
+		return msg
+	}
+	return dm
+}
+
+// fetchAllExtensions recursively fetches from the server extensions for the given message type as well as
+// for all message types of nested fields. The extensions are added to the given dynamic registry of extensions
+// so that all server-known extensions can be correctly parsed by grpcurl.
+func fetchAllExtensions(source DescriptorSource, ext *dynamic.ExtensionRegistry, md *desc.MessageDescriptor, alreadyFetched map[string]bool) error {
+	msgTypeName := md.GetFullyQualifiedName()
+	if alreadyFetched[msgTypeName] {
+		return nil
+	}
+	alreadyFetched[msgTypeName] = true
+	if len(md.GetExtensionRanges()) > 0 {
+		fds, err := source.AllExtensionsForType(msgTypeName)
+		if err != nil {
+			return fmt.Errorf("failed to query for extensions of type %s: %v", msgTypeName, err)
+		}
+		for _, fd := range fds {
+			if err := ext.AddExtension(fd); err != nil {
+				return fmt.Errorf("could not register extension %s of type %s: %v", fd.GetFullyQualifiedName(), msgTypeName, err)
+			}
+		}
+	}
+	// recursively fetch extensions for the types of any message fields
+	for _, fd := range md.GetFields() {
+		if fd.GetMessageType() != nil {
+			err := fetchAllExtensions(source, ext, fd.GetMessageType(), alreadyFetched)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// fullConvertToDynamic attempts to convert the given message to a dynamic message as well
+// as any nested messages it may contain as field values. If the given message factory has
+// extensions registered that were not known when the given message was parsed, this effectively
+// allows re-parsing to identify those extensions.
+func fullyConvertToDynamic(msgFact *dynamic.MessageFactory, msg proto.Message) (proto.Message, error) {
+	if _, ok := msg.(*dynamic.Message); ok {
+		return msg, nil // already a dynamic message
+	}
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return nil, err
+	}
+	newMsg := msgFact.NewMessage(md)
+	dm, ok := newMsg.(*dynamic.Message)
+	if !ok {
+		// if message factory didn't produce a dynamic message, then we should leave msg as is
+		return msg, nil
+	}
+
+	if err := dm.ConvertFrom(msg); err != nil {
+		return nil, err
+	}
+
+	// recursively convert all field values, too
+	for _, fd := range md.GetFields() {
+		if fd.IsMap() {
+			if fd.GetMapValueType().GetMessageType() != nil {
+				m := dm.GetField(fd).(map[interface{}]interface{})
+				for k, v := range m {
+					// keys can't be nested messages; so we only need to recurse through map values, not keys
+					newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+					if err != nil {
+						return nil, err
+					}
+					dm.PutMapField(fd, k, newVal)
+				}
+			}
+		} else if fd.IsRepeated() {
+			if fd.GetMessageType() != nil {
+				s := dm.GetField(fd).([]interface{})
+				for i, e := range s {
+					newVal, err := fullyConvertToDynamic(msgFact, e.(proto.Message))
+					if err != nil {
+						return nil, err
+					}
+					dm.SetRepeatedField(fd, i, newVal)
+				}
+			}
+		} else {
+			if fd.GetMessageType() != nil {
+				v := dm.GetField(fd)
+				newVal, err := fullyConvertToDynamic(msgFact, v.(proto.Message))
+				if err != nil {
+					return nil, err
+				}
+				dm.SetField(fd, newVal)
+			}
+		}
+	}
+	return dm, nil
+}
+
+// MakeTemplate returns a message instance for the given descriptor that is a
+// suitable template for creating an instance of that message in JSON. In
+// particular, it ensures that any repeated fields (which include map fields)
+// are not empty, so they will render with a single element (to show the types
+// and optionally nested fields). It also ensures that nested messages are not
+// nil by setting them to a message that is also fleshed out as a template
+// message.
+func MakeTemplate(md *desc.MessageDescriptor) proto.Message {
+	return makeTemplate(md, nil)
+}
+
+func makeTemplate(md *desc.MessageDescriptor, path []*desc.MessageDescriptor) proto.Message {
+	switch md.GetFullyQualifiedName() {
+	case "google.protobuf.Any":
+		// empty type URL is not allowed by JSON representation
+		// so we must give it a dummy type
+		msg, _ := ptypes.MarshalAny(&empty.Empty{})
+		return msg
+	case "google.protobuf.Value":
+		// unset kind is not allowed by JSON representation
+		// so we must give it something
+		return &structpb.Value{
+			Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+				Fields: map[string]*structpb.Value{
+					"google.protobuf.Value": {Kind: &structpb.Value_StringValue{
+						StringValue: "supports arbitrary JSON",
+					}},
+				},
+			}},
+		}
+	case "google.protobuf.ListValue":
+		return &structpb.ListValue{
+			Values: []*structpb.Value{
+				{
+					Kind: &structpb.Value_StructValue{StructValue: &structpb.Struct{
+						Fields: map[string]*structpb.Value{
+							"google.protobuf.ListValue": {Kind: &structpb.Value_StringValue{
+								StringValue: "is an array of arbitrary JSON values",
+							}},
+						},
+					}},
+				},
+			},
+		}
+	case "google.protobuf.Struct":
+		return &structpb.Struct{
+			Fields: map[string]*structpb.Value{
+				"google.protobuf.Struct": {Kind: &structpb.Value_StringValue{
+					StringValue: "supports arbitrary JSON objects",
+				}},
+			},
+		}
+	}
+
+	dm := dynamic.NewMessage(md)
+
+	// if the message is a recursive structure, we don't want to blow the stack
+	for _, seen := range path {
+		if seen == md {
+			// already visited this type; avoid infinite recursion
+			return dm
+		}
+	}
+	path = append(path, dm.GetMessageDescriptor())
+
+	// for repeated fields, add a single element with default value
+	// and for message fields, add a message with all default fields
+	// that also has non-nil message and non-empty repeated fields
+
+	for _, fd := range dm.GetMessageDescriptor().GetFields() {
+		if fd.IsRepeated() {
+			switch fd.GetType() {
+			case descpb.FieldDescriptorProto_TYPE_FIXED32,
+				descpb.FieldDescriptorProto_TYPE_UINT32:
+				dm.AddRepeatedField(fd, uint32(0))
+
+			case descpb.FieldDescriptorProto_TYPE_SFIXED32,
+				descpb.FieldDescriptorProto_TYPE_SINT32,
+				descpb.FieldDescriptorProto_TYPE_INT32,
+				descpb.FieldDescriptorProto_TYPE_ENUM:
+				dm.AddRepeatedField(fd, int32(0))
+
+			case descpb.FieldDescriptorProto_TYPE_FIXED64,
+				descpb.FieldDescriptorProto_TYPE_UINT64:
+				dm.AddRepeatedField(fd, uint64(0))
+
+			case descpb.FieldDescriptorProto_TYPE_SFIXED64,
+				descpb.FieldDescriptorProto_TYPE_SINT64,
+				descpb.FieldDescriptorProto_TYPE_INT64:
+				dm.AddRepeatedField(fd, int64(0))
+
+			case descpb.FieldDescriptorProto_TYPE_STRING:
+				dm.AddRepeatedField(fd, "")
+
+			case descpb.FieldDescriptorProto_TYPE_BYTES:
+				dm.AddRepeatedField(fd, []byte{})
+
+			case descpb.FieldDescriptorProto_TYPE_BOOL:
+				dm.AddRepeatedField(fd, false)
+
+			case descpb.FieldDescriptorProto_TYPE_FLOAT:
+				dm.AddRepeatedField(fd, float32(0))
+
+			case descpb.FieldDescriptorProto_TYPE_DOUBLE:
+				dm.AddRepeatedField(fd, float64(0))
+
+			case descpb.FieldDescriptorProto_TYPE_MESSAGE,
+				descpb.FieldDescriptorProto_TYPE_GROUP:
+				dm.AddRepeatedField(fd, makeTemplate(fd.GetMessageType(), path))
+			}
+		} else if fd.GetMessageType() != nil {
+			dm.SetField(fd, makeTemplate(fd.GetMessageType(), path))
+		}
+	}
+	return dm
+}
+
+// ClientTransportCredentials builds transport credentials for a gRPC client using the
+// given properties. If cacertFile is blank, only standard trusted certs are used to
+// verify the server certs. If clientCertFile is blank, the client will not use a client
+// certificate. If clientCertFile is not blank then clientKeyFile must not be blank.
+func ClientTransportCredentials(insecureSkipVerify bool, cacertFile, clientCertFile, clientKeyFile string) (credentials.TransportCredentials, error) {
+	var tlsConf tls.Config
+
+	if clientCertFile != "" {
+		// Load the client certificates from disk
+		certificate, err := tls.LoadX509KeyPair(clientCertFile, clientKeyFile)
+		if err != nil {
+			return nil, fmt.Errorf("could not load client key pair: %v", err)
+		}
+		tlsConf.Certificates = []tls.Certificate{certificate}
+	}
+
+	if insecureSkipVerify {
+		tlsConf.InsecureSkipVerify = true
+	} else if cacertFile != "" {
+		// Create a certificate pool from the certificate authority
+		certPool := x509.NewCertPool()
+		ca, err := ioutil.ReadFile(cacertFile)
+		if err != nil {
+			return nil, fmt.Errorf("could not read ca certificate: %v", err)
+		}
+
+		// Append the certificates from the CA
+		if ok := certPool.AppendCertsFromPEM(ca); !ok {
+			return nil, errors.New("failed to append ca certs")
+		}
+
+		tlsConf.RootCAs = certPool
+	}
+
+	return credentials.NewTLS(&tlsConf), nil
+}
+
+// ServerTransportCredentials builds transport credentials for a gRPC server using the
+// given properties. If cacertFile is blank, the server will not request client certs
+// unless requireClientCerts is true. When requireClientCerts is false and cacertFile is
+// not blank, the server will verify client certs when presented, but will not require
+// client certs. The serverCertFile and serverKeyFile must both not be blank.
+func ServerTransportCredentials(cacertFile, serverCertFile, serverKeyFile string, requireClientCerts bool) (credentials.TransportCredentials, error) {
+	var tlsConf tls.Config
+	// TODO(jh): Remove this line once https://github.com/golang/go/issues/28779 is fixed
+	// in Go tip. Until then, the recently merged TLS 1.3 support breaks the TLS tests.
+	tlsConf.MaxVersion = tls.VersionTLS12
+
+	// Load the server certificates from disk
+	certificate, err := tls.LoadX509KeyPair(serverCertFile, serverKeyFile)
+	if err != nil {
+		return nil, fmt.Errorf("could not load key pair: %v", err)
+	}
+	tlsConf.Certificates = []tls.Certificate{certificate}
+
+	if cacertFile != "" {
+		// Create a certificate pool from the certificate authority
+		certPool := x509.NewCertPool()
+		ca, err := ioutil.ReadFile(cacertFile)
+		if err != nil {
+			return nil, fmt.Errorf("could not read ca certificate: %v", err)
+		}
+
+		// Append the certificates from the CA
+		if ok := certPool.AppendCertsFromPEM(ca); !ok {
+			return nil, errors.New("failed to append ca certs")
+		}
+
+		tlsConf.ClientCAs = certPool
+	}
+
+	if requireClientCerts {
+		tlsConf.ClientAuth = tls.RequireAndVerifyClientCert
+	} else if cacertFile != "" {
+		tlsConf.ClientAuth = tls.VerifyClientCertIfGiven
+	} else {
+		tlsConf.ClientAuth = tls.NoClientCert
+	}
+
+	return credentials.NewTLS(&tlsConf), nil
+}
+
+// BlockingDial is a helper method to dial the given address, using optional TLS credentials,
+// and blocking until the returned connection is ready. If the given credentials are nil, the
+// connection will be insecure (plain-text).
+func BlockingDial(ctx context.Context, network, address string, creds credentials.TransportCredentials, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
+	// grpc.Dial doesn't provide any information on permanent connection errors (like
+	// TLS handshake failures). So in order to provide good error messages, we need a
+	// custom dialer that can provide that info. That means we manage the TLS handshake.
+	result := make(chan interface{}, 1)
+
+	writeResult := func(res interface{}) {
+		// non-blocking write: we only need the first result
+		select {
+		case result <- res:
+		default:
+		}
+	}
+
+	dialer := func(ctx context.Context, address string) (net.Conn, error) {
+		conn, err := (&net.Dialer{}).DialContext(ctx, network, address)
+		if err != nil {
+			writeResult(err)
+			return nil, err
+		}
+		if creds != nil {
+			conn, _, err = creds.ClientHandshake(ctx, address, conn)
+			if err != nil {
+				writeResult(err)
+				return nil, err
+			}
+		}
+		return conn, nil
+	}
+
+	// Even with grpc.FailOnNonTempDialError, this call will usually timeout in
+	// the face of TLS handshake errors. So we can't rely on grpc.WithBlock() to
+	// know when we're done. So we run it in a goroutine and then use result
+	// channel to either get the channel or fail-fast.
+	go func() {
+		opts = append(opts,
+			grpc.WithBlock(),
+			grpc.FailOnNonTempDialError(true),
+			grpc.WithContextDialer(dialer),
+			grpc.WithInsecure(), // we are handling TLS, so tell grpc not to
+		)
+		conn, err := grpc.DialContext(ctx, address, opts...)
+		var res interface{}
+		if err != nil {
+			res = err
+		} else {
+			res = conn
+		}
+		writeResult(res)
+	}()
+
+	select {
+	case res := <-result:
+		if conn, ok := res.(*grpc.ClientConn); ok {
+			return conn, nil
+		}
+		return nil, res.(error)
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	}
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/invoke.go b/vendor/github.com/fullstorydev/grpcurl/invoke.go
new file mode 100644
index 0000000..d2f16cb
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/invoke.go
@@ -0,0 +1,389 @@
+package grpcurl
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/dynamic"
+	"github.com/jhump/protoreflect/dynamic/grpcdynamic"
+	"github.com/jhump/protoreflect/grpcreflect"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// InvocationEventHandler is a bag of callbacks for handling events that occur in the course
+// of invoking an RPC. The handler also provides request data that is sent. The callbacks are
+// generally called in the order they are listed below.
+type InvocationEventHandler interface {
+	// OnResolveMethod is called with a descriptor of the method that is being invoked.
+	OnResolveMethod(*desc.MethodDescriptor)
+	// OnSendHeaders is called with the request metadata that is being sent.
+	OnSendHeaders(metadata.MD)
+	// OnReceiveHeaders is called when response headers have been received.
+	OnReceiveHeaders(metadata.MD)
+	// OnReceiveResponse is called for each response message received.
+	OnReceiveResponse(proto.Message)
+	// OnReceiveTrailers is called when response trailers and final RPC status have been received.
+	OnReceiveTrailers(*status.Status, metadata.MD)
+}
+
+// RequestMessageSupplier is a function that is called to retrieve request
+// messages for a GRPC operation. This type is deprecated and will be removed in
+// a future release.
+//
+// Deprecated: This is only used with the deprecated InvokeRpc. Instead, use
+// RequestSupplier with InvokeRPC.
+type RequestMessageSupplier func() ([]byte, error)
+
+// InvokeRpc uses the given gRPC connection to invoke the given method. This function is deprecated
+// and will be removed in a future release. It just delegates to the similarly named InvokeRPC
+// method, whose signature is only slightly different.
+//
+// Deprecated: use InvokeRPC instead.
+func InvokeRpc(ctx context.Context, source DescriptorSource, cc *grpc.ClientConn, methodName string,
+	headers []string, handler InvocationEventHandler, requestData RequestMessageSupplier) error {
+
+	return InvokeRPC(ctx, source, cc, methodName, headers, handler, func(m proto.Message) error {
+		// New function is almost identical, but the request supplier function works differently.
+		// So we adapt the logic here to maintain compatibility.
+		data, err := requestData()
+		if err != nil {
+			return err
+		}
+		return jsonpb.Unmarshal(bytes.NewReader(data), m)
+	})
+}
+
+// RequestSupplier is a function that is called to populate messages for a gRPC operation. The
+// function should populate the given message or return a non-nil error. If the supplier has no
+// more messages, it should return io.EOF. When it returns io.EOF, it should not in any way
+// modify the given message argument.
+type RequestSupplier func(proto.Message) error
+
+// InvokeRPC uses the given gRPC channel to invoke the given method. The given descriptor source
+// is used to determine the type of method and the type of request and response message. The given
+// headers are sent as request metadata. Methods on the given event handler are called as the
+// invocation proceeds.
+//
+// The given requestData function supplies the actual data to send. It should return io.EOF when
+// there is no more request data. If the method being invoked is a unary or server-streaming RPC
+// (e.g. exactly one request message) and there is no request data (e.g. the first invocation of
+// the function returns io.EOF), then an empty request message is sent.
+//
+// If the requestData function and the given event handler coordinate or share any state, they should
+// be thread-safe. This is because the requestData function may be called from a different goroutine
+// than the one invoking event callbacks. (This only happens for bi-directional streaming RPCs, where
+// one goroutine sends request messages and another consumes the response messages).
+func InvokeRPC(ctx context.Context, source DescriptorSource, ch grpcdynamic.Channel, methodName string,
+	headers []string, handler InvocationEventHandler, requestData RequestSupplier) error {
+
+	md := MetadataFromHeaders(headers)
+
+	svc, mth := parseSymbol(methodName)
+	if svc == "" || mth == "" {
+		return fmt.Errorf("given method name %q is not in expected format: 'service/method' or 'service.method'", methodName)
+	}
+	dsc, err := source.FindSymbol(svc)
+	if err != nil {
+		if isNotFoundError(err) {
+			return fmt.Errorf("target server does not expose service %q", svc)
+		}
+		return fmt.Errorf("failed to query for service descriptor %q: %v", svc, err)
+	}
+	sd, ok := dsc.(*desc.ServiceDescriptor)
+	if !ok {
+		return fmt.Errorf("target server does not expose service %q", svc)
+	}
+	mtd := sd.FindMethodByName(mth)
+	if mtd == nil {
+		return fmt.Errorf("service %q does not include a method named %q", svc, mth)
+	}
+
+	handler.OnResolveMethod(mtd)
+
+	// we also download any applicable extensions so we can provide full support for parsing user-provided data
+	var ext dynamic.ExtensionRegistry
+	alreadyFetched := map[string]bool{}
+	if err = fetchAllExtensions(source, &ext, mtd.GetInputType(), alreadyFetched); err != nil {
+		return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetInputType().GetFullyQualifiedName(), err)
+	}
+	if err = fetchAllExtensions(source, &ext, mtd.GetOutputType(), alreadyFetched); err != nil {
+		return fmt.Errorf("error resolving server extensions for message %s: %v", mtd.GetOutputType().GetFullyQualifiedName(), err)
+	}
+
+	msgFactory := dynamic.NewMessageFactoryWithExtensionRegistry(&ext)
+	req := msgFactory.NewMessage(mtd.GetInputType())
+
+	handler.OnSendHeaders(md)
+	ctx = metadata.NewOutgoingContext(ctx, md)
+
+	stub := grpcdynamic.NewStubWithMessageFactory(ch, msgFactory)
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	if mtd.IsClientStreaming() && mtd.IsServerStreaming() {
+		return invokeBidi(ctx, stub, mtd, handler, requestData, req)
+	} else if mtd.IsClientStreaming() {
+		return invokeClientStream(ctx, stub, mtd, handler, requestData, req)
+	} else if mtd.IsServerStreaming() {
+		return invokeServerStream(ctx, stub, mtd, handler, requestData, req)
+	} else {
+		return invokeUnary(ctx, stub, mtd, handler, requestData, req)
+	}
+}
+
+func invokeUnary(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+	requestData RequestSupplier, req proto.Message) error {
+
+	err := requestData(req)
+	if err != nil && err != io.EOF {
+		return fmt.Errorf("error getting request data: %v", err)
+	}
+	if err != io.EOF {
+		// verify there is no second message, which is a usage error
+		err := requestData(req)
+		if err == nil {
+			return fmt.Errorf("method %q is a unary RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+		} else if err != io.EOF {
+			return fmt.Errorf("error getting request data: %v", err)
+		}
+	}
+
+	// Now we can actually invoke the RPC!
+	var respHeaders metadata.MD
+	var respTrailers metadata.MD
+	resp, err := stub.InvokeRpc(ctx, md, req, grpc.Trailer(&respTrailers), grpc.Header(&respHeaders))
+
+	stat, ok := status.FromError(err)
+	if !ok {
+		// Error codes sent from the server will get printed differently below.
+		// So just bail for other kinds of errors here.
+		return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+	}
+
+	handler.OnReceiveHeaders(respHeaders)
+
+	if stat.Code() == codes.OK {
+		handler.OnReceiveResponse(resp)
+	}
+
+	handler.OnReceiveTrailers(stat, respTrailers)
+
+	return nil
+}
+
+func invokeClientStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+	requestData RequestSupplier, req proto.Message) error {
+
+	// invoke the RPC!
+	str, err := stub.InvokeRpcClientStream(ctx, md)
+
+	// Upload each request message in the stream
+	var resp proto.Message
+	for err == nil {
+		err = requestData(req)
+		if err == io.EOF {
+			resp, err = str.CloseAndReceive()
+			break
+		}
+		if err != nil {
+			return fmt.Errorf("error getting request data: %v", err)
+		}
+
+		err = str.SendMsg(req)
+		if err == io.EOF {
+			// We get EOF on send if the server says "go away"
+			// We have to use CloseAndReceive to get the actual code
+			resp, err = str.CloseAndReceive()
+			break
+		}
+
+		req.Reset()
+	}
+
+	// finally, process response data
+	stat, ok := status.FromError(err)
+	if !ok {
+		// Error codes sent from the server will get printed differently below.
+		// So just bail for other kinds of errors here.
+		return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+	}
+
+	if respHeaders, err := str.Header(); err == nil {
+		handler.OnReceiveHeaders(respHeaders)
+	}
+
+	if stat.Code() == codes.OK {
+		handler.OnReceiveResponse(resp)
+	}
+
+	handler.OnReceiveTrailers(stat, str.Trailer())
+
+	return nil
+}
+
+func invokeServerStream(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+	requestData RequestSupplier, req proto.Message) error {
+
+	err := requestData(req)
+	if err != nil && err != io.EOF {
+		return fmt.Errorf("error getting request data: %v", err)
+	}
+	if err != io.EOF {
+		// verify there is no second message, which is a usage error
+		err := requestData(req)
+		if err == nil {
+			return fmt.Errorf("method %q is a server-streaming RPC, but request data contained more than 1 message", md.GetFullyQualifiedName())
+		} else if err != io.EOF {
+			return fmt.Errorf("error getting request data: %v", err)
+		}
+	}
+
+	// Now we can actually invoke the RPC!
+	str, err := stub.InvokeRpcServerStream(ctx, md, req)
+
+	if respHeaders, err := str.Header(); err == nil {
+		handler.OnReceiveHeaders(respHeaders)
+	}
+
+	// Download each response message
+	for err == nil {
+		var resp proto.Message
+		resp, err = str.RecvMsg()
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+			}
+			break
+		}
+		handler.OnReceiveResponse(resp)
+	}
+
+	stat, ok := status.FromError(err)
+	if !ok {
+		// Error codes sent from the server will get printed differently below.
+		// So just bail for other kinds of errors here.
+		return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+	}
+
+	handler.OnReceiveTrailers(stat, str.Trailer())
+
+	return nil
+}
+
+func invokeBidi(ctx context.Context, stub grpcdynamic.Stub, md *desc.MethodDescriptor, handler InvocationEventHandler,
+	requestData RequestSupplier, req proto.Message) error {
+
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	// invoke the RPC!
+	str, err := stub.InvokeRpcBidiStream(ctx, md)
+
+	var wg sync.WaitGroup
+	var sendErr atomic.Value
+
+	defer wg.Wait()
+
+	if err == nil {
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+
+			// Concurrently upload each request message in the stream
+			var err error
+			for err == nil {
+				err = requestData(req)
+
+				if err == io.EOF {
+					err = str.CloseSend()
+					break
+				}
+				if err != nil {
+					err = fmt.Errorf("error getting request data: %v", err)
+					cancel()
+					break
+				}
+
+				err = str.SendMsg(req)
+
+				req.Reset()
+			}
+
+			if err != nil {
+				sendErr.Store(err)
+			}
+		}()
+	}
+
+	if respHeaders, err := str.Header(); err == nil {
+		handler.OnReceiveHeaders(respHeaders)
+	}
+
+	// Download each response message
+	for err == nil {
+		var resp proto.Message
+		resp, err = str.RecvMsg()
+		if err != nil {
+			if err == io.EOF {
+				err = nil
+			}
+			break
+		}
+		handler.OnReceiveResponse(resp)
+	}
+
+	if se, ok := sendErr.Load().(error); ok && se != io.EOF {
+		err = se
+	}
+
+	stat, ok := status.FromError(err)
+	if !ok {
+		// Error codes sent from the server will get printed differently below.
+		// So just bail for other kinds of errors here.
+		return fmt.Errorf("grpc call for %q failed: %v", md.GetFullyQualifiedName(), err)
+	}
+
+	handler.OnReceiveTrailers(stat, str.Trailer())
+
+	return nil
+}
+
+type notFoundError string
+
+func notFound(kind, name string) error {
+	return notFoundError(fmt.Sprintf("%s not found: %s", kind, name))
+}
+
+func (e notFoundError) Error() string {
+	return string(e)
+}
+
+func isNotFoundError(err error) bool {
+	if grpcreflect.IsElementNotFoundError(err) {
+		return true
+	}
+	_, ok := err.(notFoundError)
+	return ok
+}
+
+func parseSymbol(svcAndMethod string) (string, string) {
+	pos := strings.LastIndex(svcAndMethod, "/")
+	if pos < 0 {
+		pos = strings.LastIndex(svcAndMethod, ".")
+		if pos < 0 {
+			return "", ""
+		}
+	}
+	return svcAndMethod[:pos], svcAndMethod[pos+1:]
+}
diff --git a/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
new file mode 100755
index 0000000..407f7dc
--- /dev/null
+++ b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
@@ -0,0 +1,54 @@
+#!/bin/bash
+
+set -e
+
+cd "$(dirname $0)"
+
+# Run this script to generate files used by tests.
+
+echo "Creating protosets..."
+protoc testing/test.proto \
+	--include_imports \
+	--descriptor_set_out=testing/test.protoset
+
+protoc testing/example.proto \
+	--include_imports \
+	--descriptor_set_out=testing/example.protoset
+
+echo "Creating certs for TLS testing..."
+if ! hash certstrap 2>/dev/null; then
+  # certstrap not found: try to install it
+  go get github.com/square/certstrap
+  go install github.com/square/certstrap
+fi
+
+function cs() {
+	certstrap --depot-path testing/tls "$@" --passphrase ""
+}
+
+rm -rf testing/tls
+
+# Create CA
+cs init --years 10 --common-name ca
+
+# Create client cert
+cs request-cert --common-name client
+cs sign client --years 10 --CA ca
+
+# Create server cert
+cs request-cert --common-name server --ip 127.0.0.1 --domain localhost
+cs sign server --years 10 --CA ca
+
+# Create another server cert for error testing
+cs request-cert --common-name other --ip 1.2.3.4 --domain foobar.com
+cs sign other --years 10 --CA ca
+
+# Create another CA and client cert for more
+# error testing
+cs init --years 10 --common-name wrong-ca
+cs request-cert --common-name wrong-client
+cs sign wrong-client --years 10 --CA wrong-ca
+
+# Create expired cert
+cs request-cert --common-name expired --ip 127.0.0.1 --domain localhost
+cs sign expired --years 0 --CA ca
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 0000000..e256a31
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 0000000..0e9d6ed
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+  - 1.3
+  - 1.4
+script:
+  - go test
+  - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 0000000..7805d36
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 0000000..0200f75
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+	exampleKey: !!binary gIGC
+
+GOOD:
+	exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+type Person struct {
+	Name string `json:"name"` // Affects YAML field names too.
+	Age  int    `json:"age"`
+}
+
+func main() {
+	// Marshal a Person struct to YAML.
+	p := Person{"John", 30}
+	y, err := yaml.Marshal(p)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	age: 30
+	name: John
+	*/
+
+	// Unmarshal the YAML back into a Person struct.
+	var p2 Person
+	err = yaml.Unmarshal(y, &p2)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(p2)
+	/* Output:
+	{John 30}
+	*/
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+	"fmt"
+
+	"github.com/ghodss/yaml"
+)
+
+func main() {
+	j := []byte(`{"name": "John", "age": 30}`)
+	y, err := yaml.JSONToYAML(j)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(y))
+	/* Output:
+	name: John
+	age: 30
+	*/
+	j2, err := yaml.YAMLToJSON(y)
+	if err != nil {
+		fmt.Printf("err: %v\n", err)
+		return
+	}
+	fmt.Println(string(j2))
+	/* Output:
+	{"age":30,"name":"John"}
+	*/
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 0000000..5860074
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/json"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			if v.CanSet() {
+				v.Set(reflect.New(v.Type().Elem()))
+			} else {
+				v = reflect.New(v.Type().Elem())
+			}
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(json.Unmarshaler); ok {
+				return u, nil, reflect.Value{}
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, reflect.Value{}
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    opts.Contains("string"),
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'Å¿' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 0000000..4fb4054
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"reflect"
+	"strconv"
+
+	"gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+	j, err := json.Marshal(o)
+	if err != nil {
+		return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+	}
+
+	y, err := JSONToYAML(j)
+	if err != nil {
+		return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+	}
+
+	return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+	vo := reflect.ValueOf(o)
+	j, err := yamlToJSON(y, &vo)
+	if err != nil {
+		return fmt.Errorf("error converting YAML to JSON: %v", err)
+	}
+
+	err = json.Unmarshal(j, o)
+	if err != nil {
+		return fmt.Errorf("error unmarshaling JSON: %v", err)
+	}
+
+	return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+	// Convert the JSON to an object.
+	var jsonObj interface{}
+	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+	// Go JSON library doesn't try to pick the right number type (int, float,
+	// etc.) when unmarshalling to interface{}, it just picks float64
+	// universally. go-yaml does go through the effort of picking the right
+	// number type, so we can preserve number type throughout this process.
+	err := yaml.Unmarshal(j, &jsonObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// Marshal this object into YAML.
+	return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+//   in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+//   use binary data with this library, encode the data as base64 as usual but do
+//   not use the !!binary tag in your YAML. This will ensure the original base64
+//   encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+	return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+	// Convert the YAML to an object.
+	var yamlObj interface{}
+	err := yaml.Unmarshal(y, &yamlObj)
+	if err != nil {
+		return nil, err
+	}
+
+	// YAML objects are not completely compatible with JSON objects (e.g. you
+	// can have non-string keys in YAML). So, convert the YAML-compatible object
+	// to a JSON-compatible object, failing with an error if irrecoverable
+	// incompatibilties happen along the way.
+	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+	if err != nil {
+		return nil, err
+	}
+
+	// Convert this object to JSON and return the data.
+	return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+	var err error
+
+	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+	// interface). We pass decodingNull as false because we're not actually
+	// decoding into the value, we're just checking if the ultimate target is a
+	// string.
+	if jsonTarget != nil {
+		ju, tu, pv := indirect(*jsonTarget, false)
+		// We have a JSON or Text Umarshaler at this level, so we can't be trying
+		// to decode into a string.
+		if ju != nil || tu != nil {
+			jsonTarget = nil
+		} else {
+			jsonTarget = &pv
+		}
+	}
+
+	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
+	// if so, coerce.  Else return normal.
+	// If yamlObj is a map or array, find the field that each key is
+	// unmarshaling to, and when you recurse pass the reflect.Value for that
+	// field back into this function.
+	switch typedYAMLObj := yamlObj.(type) {
+	case map[interface{}]interface{}:
+		// JSON does not support arbitrary keys in a map, so we must convert
+		// these keys to strings.
+		//
+		// From my reading of go-yaml v2 (specifically the resolve function),
+		// keys can only have the types string, int, int64, float64, binary
+		// (unsupported), or null (unsupported).
+		strMap := make(map[string]interface{})
+		for k, v := range typedYAMLObj {
+			// Resolve the key to a string first.
+			var keyString string
+			switch typedKey := k.(type) {
+			case string:
+				keyString = typedKey
+			case int:
+				keyString = strconv.Itoa(typedKey)
+			case int64:
+				// go-yaml will only return an int64 as a key if the system
+				// architecture is 32-bit and the key's value is between 32-bit
+				// and 64-bit. Otherwise the key type will simply be int.
+				keyString = strconv.FormatInt(typedKey, 10)
+			case float64:
+				// Stolen from go-yaml to use the same conversion to string as
+				// the go-yaml library uses to convert float to string when
+				// Marshaling.
+				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+				switch s {
+				case "+Inf":
+					s = ".inf"
+				case "-Inf":
+					s = "-.inf"
+				case "NaN":
+					s = ".nan"
+				}
+				keyString = s
+			case bool:
+				if typedKey {
+					keyString = "true"
+				} else {
+					keyString = "false"
+				}
+			default:
+				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+					reflect.TypeOf(k), k, v)
+			}
+
+			// jsonTarget should be a struct or a map. If it's a struct, find
+			// the field it's going to map to and pass its reflect.Value. If
+			// it's a map, find the element type of the map and pass the
+			// reflect.Value created from that type. If it's neither, just pass
+			// nil - JSON conversion will error for us if it's a real issue.
+			if jsonTarget != nil {
+				t := *jsonTarget
+				if t.Kind() == reflect.Struct {
+					keyBytes := []byte(keyString)
+					// Find the field that the JSON library would use.
+					var f *field
+					fields := cachedTypeFields(t.Type())
+					for i := range fields {
+						ff := &fields[i]
+						if bytes.Equal(ff.nameBytes, keyBytes) {
+							f = ff
+							break
+						}
+						// Do case-insensitive comparison.
+						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+							f = ff
+						}
+					}
+					if f != nil {
+						// Find the reflect.Value of the most preferential
+						// struct field.
+						jtf := t.Field(f.index[0])
+						strMap[keyString], err = convertToJSONableObject(v, &jtf)
+						if err != nil {
+							return nil, err
+						}
+						continue
+					}
+				} else if t.Kind() == reflect.Map {
+					// Create a zero value of the map's element type to use as
+					// the JSON target.
+					jtv := reflect.Zero(t.Type().Elem())
+					strMap[keyString], err = convertToJSONableObject(v, &jtv)
+					if err != nil {
+						return nil, err
+					}
+					continue
+				}
+			}
+			strMap[keyString], err = convertToJSONableObject(v, nil)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return strMap, nil
+	case []interface{}:
+		// We need to recurse into arrays in case there are any
+		// map[interface{}]interface{}'s inside and to convert any
+		// numbers to strings.
+
+		// If jsonTarget is a slice (which it really should be), find the
+		// thing it's going to map to. If it's not a slice, just pass nil
+		// - JSON conversion will error for us if it's a real issue.
+		var jsonSliceElemValue *reflect.Value
+		if jsonTarget != nil {
+			t := *jsonTarget
+			if t.Kind() == reflect.Slice {
+				// By default slices point to nil, but we need a reflect.Value
+				// pointing to a value of the slice type, so we create one here.
+				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+				jsonSliceElemValue = &ev
+			}
+		}
+
+		// Make and use a new array.
+		arr := make([]interface{}, len(typedYAMLObj))
+		for i, v := range typedYAMLObj {
+			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+			if err != nil {
+				return nil, err
+			}
+		}
+		return arr, nil
+	default:
+		// If the target type is a string and the YAML type is a number,
+		// convert the YAML type to a string.
+		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+			// Based on my reading of go-yaml, it may return int, int64,
+			// float64, or uint64.
+			var s string
+			switch typedVal := typedYAMLObj.(type) {
+			case int:
+				s = strconv.FormatInt(int64(typedVal), 10)
+			case int64:
+				s = strconv.FormatInt(typedVal, 10)
+			case float64:
+				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+			case uint64:
+				s = strconv.FormatUint(typedVal, 10)
+			case bool:
+				if typedVal {
+					s = "true"
+				} else {
+					s = "false"
+				}
+			}
+			if len(s) > 0 {
+				yamlObj = interface{}(s)
+			}
+		}
+		return yamlObj, nil
+	}
+
+	return nil, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
new file mode 100644
index 0000000..3d97fc7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of GoGo authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS file, which
+# lists people.  For example, employees are listed in CONTRIBUTORS,
+# but not in AUTHORS, because the employer holds the copyright.
+
+# Names should be added to this file as one of
+#     Organization's name
+#     Individual's name <submission email address>
+#     Individual's name <submission email address> <email2> <emailN>
+
+# Please keep the list sorted.
+
+Sendgrid, Inc
+Vastech SA (PTY) LTD
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1b4f6c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -0,0 +1,23 @@
+Anton Povarov <anton.povarov@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Clayton Coleman <ccoleman@redhat.com>
+Denis Smirnov <denis.smirnov.91@gmail.com>
+DongYun Kang <ceram1000@gmail.com>
+Dwayne Schultz <dschultz@pivotal.io>
+Georg Apitz <gapitz@pivotal.io>
+Gustav Paul <gustav.paul@gmail.com>
+Johan Brandhorst <johan.brandhorst@gmail.com>
+John Shahid <jvshahid@gmail.com>
+John Tuley <john@tuley.org>
+Laurent <laurent@adyoulike.com>
+Patrick Lee <patrick@dropbox.com>
+Peter Edge <peter.edge@gmail.com>
+Roger Johansson <rogeralsing@gmail.com>
+Sam Nguyen <sam.nguyen@sendgrid.com>
+Sergio Arbeo <serabe@gmail.com>
+Stephen J Day <stephen.day@docker.com>
+Tamir Duberstein <tamird@gmail.com>
+Todd Eisenberger <teisenberger@dropbox.com>
+Tormod Erevik Lea <tormodlea@gmail.com>
+Vyacheslav Kim <kane@sendgrid.com>
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
new file mode 100644
index 0000000..b368efb
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
@@ -0,0 +1,5 @@
+The contributors to the Go protobuf repository:
+
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
\ No newline at end of file
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..f57de90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+
+Protocol Buffers for Go with Gadgets
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors.  All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..00d65f3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+	go install
+
+test: install generate-test-pbs
+	go test
+
+
+generate-test-pbs:
+	make install
+	make -C test_proto
+	make -C proto3_proto
+	make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..a26b046
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,258 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+		emOut := out.Addr().Interface().(extensionsBytes)
+		bIn := emIn.GetExtensions()
+		bOut := emOut.GetExtensions()
+		*bOut = append(*bOut, *bIn...)
+	} else if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
new file mode 100644
index 0000000..2455248
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
@@ -0,0 +1,39 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "reflect"
+
+type custom interface {
+	Marshal() ([]byte, error)
+	Unmarshal(data []byte) error
+	Size() int
+}
+
+var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
new file mode 100644
index 0000000..fe1bd7d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							discardInfo.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						discardInfo.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
new file mode 100644
index 0000000..93464c9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration.go
@@ -0,0 +1,100 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Range of a Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid Duration
+// may still be too large to fit into a time.Duration (the range of Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %#v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %#v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
+// returns an error if the Duration is invalid or is too large to be
+// represented in a time.Duration.
+func durationFromProto(p *duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos)
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a Duration.
+func durationProto(d time.Duration) *duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
new file mode 100644
index 0000000..e748e17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
+
+type duration struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *duration) Reset()       { *m = duration{} }
+func (*duration) ProtoMessage()  {}
+func (*duration) String() string { return "duration<string>" }
+
+func init() {
+	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..0f5fb17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,33 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+	return &RequiredNotSetError{field}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..686bd2a
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,604 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	case extensionsBytes:
+		return slowExtensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	if ebase, ok := base.(extensionsBytes); ok {
+		clearExtension(base, id)
+		ext := ebase.GetExtensions()
+		*ext = append(*ext, b...)
+		return
+	}
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if ea, ok := pbi.(slowExtensionAdapter); ok {
+		pbi = ea.extensionsBytes
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		buf := *ext
+		o := 0
+		for o < len(buf) {
+			tag, n := DecodeVarint(buf[o:])
+			fieldNum := int32(tag >> 3)
+			if int32(fieldNum) == extension.Field {
+				return true
+			}
+			wireType := int(tag & 0x7)
+			o += n
+			l, err := size(buf[o:], wireType)
+			if err != nil {
+				return false
+			}
+			o += l
+		}
+		return false
+	}
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+	if epb, ok := pb.(extensionsBytes); ok {
+		offset := 0
+		for offset != -1 {
+			offset = deleteExtension(epb, fieldNum, offset)
+		}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, fieldNum)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		return decodeExtensionFromBytes(extension, *ext)
+	}
+
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if cerr := checkExtensionTypes(epb, extension); cerr != nil {
+			return nil, cerr
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	if epb, ok := pb.(extensionsBytes); ok {
+		newb, err := encodeExtension(extension, value)
+		if err != nil {
+			return err
+		}
+		bb := epb.GetExtensions()
+		*bb = append(*bb, newb...)
+		return nil
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		*ext = []byte{}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..53ebd8c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,368 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+)
+
+type extensionsBytes interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	GetExtensions() *[]byte
+}
+
+type slowExtensionAdapter struct {
+	extensionsBytes
+}
+
+func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
+	panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
+}
+
+func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	b := s.GetExtensions()
+	m, err := BytesToExtensionsMap(*b)
+	if err != nil {
+		panic(err)
+	}
+	return m, notLocker{}
+}
+
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
+	if reflect.ValueOf(pb).IsNil() {
+		return ifnotset
+	}
+	value, err := GetExtension(pb, extension)
+	if err != nil {
+		return ifnotset
+	}
+	if value == nil {
+		return ifnotset
+	}
+	if value.(*bool) == nil {
+		return ifnotset
+	}
+	return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+	if err := this.Encode(); err != nil {
+		return false
+	}
+	if err := that.Encode(); err != nil {
+		return false
+	}
+	return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+	if err := this.Encode(); err != nil {
+		return 1
+	}
+	if err := that.Encode(); err != nil {
+		return -1
+	}
+	return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfInternalExtension(m extendableProto) (n int) {
+	info := getMarshalInfo(reflect.TypeOf(m))
+	return info.sizeV1Extensions(m.extensionsWrite())
+}
+
+type sortableMapElem struct {
+	field int32
+	ext   Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+	s := make(sortableExtensions, 0, len(m))
+	for k, v := range m {
+		s = append(s, &sortableMapElem{field: k, ext: v})
+	}
+	return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+	sort.Sort(this)
+	ss := make([]string, len(this))
+	for i := range this {
+		ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+	}
+	return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromInternalExtension(m extendableProto) string {
+	return StringFromExtensionsMap(m.extensionsWrite())
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+	return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+	m, err := BytesToExtensionsMap(ext)
+	if err != nil {
+		panic(err)
+	}
+	return StringFromExtensionsMap(m)
+}
+
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+	o := 0
+	for _, e := range m {
+		if err := e.Encode(); err != nil {
+			return 0, err
+		}
+		n := copy(data[o:], e.enc)
+		if n != len(e.enc) {
+			return 0, io.ErrShortBuffer
+		}
+		o += n
+	}
+	return o, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+	e := m[id]
+	if err := e.Encode(); err != nil {
+		return nil, err
+	}
+	return e.enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+	switch wire {
+	case WireVarint:
+		_, n := DecodeVarint(buf)
+		return n, nil
+	case WireFixed64:
+		return 8, nil
+	case WireBytes:
+		v, n := DecodeVarint(buf)
+		return int(v) + n, nil
+	case WireFixed32:
+		return 4, nil
+	case WireStartGroup:
+		offset := 0
+		for {
+			u, n := DecodeVarint(buf[offset:])
+			fwire := int(u & 0x7)
+			offset += n
+			if fwire == WireEndGroup {
+				return offset, nil
+			}
+			s, err := size(buf[offset:], wire)
+			if err != nil {
+				return 0, err
+			}
+			offset += s
+		}
+	}
+	return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+	m := make(map[int32]Extension)
+	i := 0
+	for i < len(buf) {
+		tag, n := DecodeVarint(buf[i:])
+		if n <= 0 {
+			return nil, fmt.Errorf("unable to decode varint")
+		}
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		l, err := size(buf[i+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		end := i + int(l) + n
+		m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+		i = end
+	}
+	return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+	ee := Extension{enc: make([]byte, len(e))}
+	copy(ee.enc, e)
+	return ee
+}
+
+func AppendExtension(e Message, tag int32, buf []byte) {
+	if ee, eok := e.(extensionsBytes); eok {
+		ext := ee.GetExtensions()
+		*ext = append(*ext, buf...)
+		return
+	}
+	if ee, eok := e.(extendableProto); eok {
+		m := ee.extensionsWrite()
+		ext := m[int32(tag)] // may be missing
+		ext.enc = append(ext.enc, buf...)
+		m[int32(tag)] = ext
+	}
+}
+
+func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
+	u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
+	ei := u.getExtElemInfo(extension)
+	v := value
+	p := toAddrPointer(&v, ei.isptr)
+	siz := ei.sizer(p, SizeVarint(ei.wiretag))
+	buf := make([]byte, 0, siz)
+	return ei.marshaler(buf, p, ei.wiretag, false)
+}
+
+func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
+	o := 0
+	for o < len(buf) {
+		tag, n := DecodeVarint((buf)[o:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		if o+n > len(buf) {
+			return nil, fmt.Errorf("unable to decode extension")
+		}
+		l, err := size((buf)[o+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		if int32(fieldNum) == extension.Field {
+			if o+n+l > len(buf) {
+				return nil, fmt.Errorf("unable to decode extension")
+			}
+			v, err := decodeExtension((buf)[o:o+n+l], extension)
+			if err != nil {
+				return nil, err
+			}
+			return v, nil
+		}
+		o += n + l
+	}
+	return defaultExtensionValue(extension)
+}
+
+func (this *Extension) Encode() error {
+	if this.enc == nil {
+		var err error
+		this.enc, err = encodeExtension(this.desc, this.value)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (this Extension) GoString() string {
+	if err := this.Encode(); err != nil {
+		return fmt.Sprintf("error encoding extension: %v", err)
+	}
+	return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return SetExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+	}
+	return GetExtension(pb, desc)
+}
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+	x := &XXX_InternalExtensions{
+		p: new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		}),
+	}
+	x.p.extensionMap = m
+	return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+	pb := extendable.(extendableProto)
+	return pb.extensionsWrite()
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+	ext := pb.GetExtensions()
+	for offset < len(*ext) {
+		tag, n1 := DecodeVarint((*ext)[offset:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		n2, err := size((*ext)[offset+n1:], wireType)
+		if err != nil {
+			panic(err)
+		}
+		newOffset := offset + n1 + n2
+		if fieldNum == theFieldNum {
+			*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+			return offset
+		}
+		offset = newOffset
+	}
+	return -1
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..d17f802
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,967 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/gogo/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/gogo/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	sindex := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or T or []*T or []T
+		switch f.Kind() {
+		case reflect.Struct:
+			setDefaults(f, recur, zeros)
+
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.Kind() == reflect.Ptr && e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Struct:
+		nestedMessage = true // non-nullable
+
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr, reflect.Struct:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion2 = true
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const GoGoProtoPackageIsVersion1 = true
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..b3aa391
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,50 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"encoding/json"
+	"strconv"
+)
+
+type Sizer interface {
+	Size() int
+}
+
+type ProtoSizer interface {
+	ProtoSize() int
+}
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+	s, ok := m[value]
+	if !ok {
+		s = strconv.Itoa(int(value))
+	}
+	return json.Marshal(s)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
new file mode 100644
index 0000000..7ffd3c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
@@ -0,0 +1,59 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+)
+
+// TODO: untested, so probably incorrect.
+
+func (p pointer) getRef() pointer {
+	return pointer{v: p.v.Addr()}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	}
+	// The interface is not of pointer type. The data word is the pointer
+	// to the data.
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..aca8eed
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,56 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func (p pointer) getRef() pointer {
+	return pointer{p: (unsafe.Pointer)(&p.p)}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..c9e5fa0
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,599 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default     string // default value
+	HasDefault  bool   // whether an explicit default was provided
+	CustomType  string
+	CastType    string
+	StdTime     bool
+	StdDuration bool
+	WktPointer  bool
+
+	stype reflect.Type      // set for struct types only
+	ctype reflect.Type      // set for custom types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		case strings.HasPrefix(f, "embedded="):
+			p.OrigName = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "customtype="):
+			p.CustomType = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "casttype="):
+			p.CastType = strings.Split(f, "=")[1]
+		case f == "stdtime":
+			p.StdTime = true
+		case f == "stdduration":
+			p.StdDuration = true
+		case f == "wktptr":
+			p.WktPointer = true
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	isMap := typ.Kind() == reflect.Map
+	if len(p.CustomType) > 0 && !isMap {
+		p.ctype = typ
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdTime && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdDuration && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.WktPointer && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	switch t1 := typ; t1.Kind() {
+	case reflect.Struct:
+		p.stype = typ
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+	case reflect.Slice:
+		switch t2 := t1.Elem(); t2.Kind() {
+		case reflect.Ptr:
+			switch t3 := t2.Elem(); t3.Kind() {
+			case reflect.Struct:
+				p.stype = t3
+			}
+		case reflect.Struct:
+			p.stype = t2
+		}
+
+	case reflect.Map:
+
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+
+		p.MapValProp.CustomType = p.CustomType
+		p.MapValProp.StdDuration = p.StdDuration
+		p.MapValProp.StdTime = p.StdTime
+		p.MapValProp.WktPointer = p.WktPointer
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+	p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	isOneofMessage := false
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			isOneofMessage = true
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	type oneofMessage interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok {
+		var oots []interface{}
+		_, _, _, oots = om.XXX_OneofFuncs()
+
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+	if _, ok := enumStringMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..40ea3dd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,36 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+)
+
+var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
+var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..5a5fd93
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,119 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"io"
+)
+
+func Skip(data []byte) (n int, err error) {
+	l := len(data)
+	index := 0
+	for index < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if index >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[index]
+			index++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				index++
+				if data[index-1] < 0x80 {
+					break
+				}
+			}
+			return index, nil
+		case 1:
+			index += 8
+			return index, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[index]
+				index++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			index += length
+			return index, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = index
+				for shift := uint(0); ; shift += 7 {
+					if index >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[index]
+					index++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := Skip(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				index = start + next
+			}
+			return index, nil
+		case 4:
+			return index, nil
+		case 5:
+			index += 4
+			return index, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..9b1538d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -0,0 +1,3006 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+
+	hassizer      bool // has custom sizer
+	hasprotosizer bool // has custom protosizer
+
+	bytesExtensions field // offset of XXX_extensions where the field type is []byte
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+
+	uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		// Uses the message's Size method if available
+		if u.hassizer {
+			s := ptr.asPointerTo(u.typ).Interface().(Sizer)
+			return s.Size()
+		}
+		// Uses the message's ProtoSize method if available
+		if u.hasprotosizer {
+			s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
+			return s.ProtoSize()
+		}
+
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		n += len(s)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		b = append(b, s...)
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.bytesExtensions = invalidField
+	u.sizecache = invalidField
+	isOneofMessage := false
+
+	if reflect.PtrTo(t).Implements(sizerType) {
+		u.hassizer = true
+	}
+	if reflect.PtrTo(t).Implements(protosizerType) {
+		u.hasprotosizer = true
+	}
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			isOneofMessage = true
+		}
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			if f.Type.Kind() == reflect.Map {
+				u.v1extensions = toField(&f)
+			} else {
+				u.bytesExtensions = toField(&f)
+			}
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage {
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	sizr, marshalr := typeMarshaler(t, tags, false, false)
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizr,
+		marshaler: marshalr,
+		isptr:     t.Kind() == reflect.Ptr,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizr,
+			marshaler: marshalr,
+		}
+	}
+}
+
+type oneofMessage interface {
+	XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tags[i], "customtype=") {
+			ctype = true
+		}
+		if tags[i] == "stdtime" {
+			isTime = true
+		}
+		if tags[i] == "stdduration" {
+			isDuration = true
+		}
+		if tags[i] == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+	if !proto3 && !pointer && !slice {
+		nozero = false
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+			}
+			if pointer {
+				return makeCustomPtrMarshaler(getMarshalInfo(t))
+			}
+			return makeCustomMarshaler(getMarshalInfo(t))
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeTimePtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeTimePtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeTimeSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeTimeMarshaler(getMarshalInfo(t))
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeDurationPtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeDurationSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeDurationMarshaler(getMarshalInfo(t))
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdDoubleValueMarshaler(getMarshalInfo(t))
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdFloatValueMarshaler(getMarshalInfo(t))
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBoolValueMarshaler(getMarshalInfo(t))
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdStringValueMarshaler(getMarshalInfo(t))
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBytesValueMarshaler(getMarshalInfo(t))
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if pointer {
+				if slice {
+					return makeMessageSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageMarshaler(getMarshalInfo(t))
+			} else {
+				if slice {
+					return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageRefMarshaler(getMarshalInfo(t))
+			}
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	stdOptions := false
+	for _, t := range tags {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)             // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if p.deterministic {
+		if _, ok := pb.(Marshaler); ok {
+			return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
+		}
+	}
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		var b []byte
+		b, err = m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
new file mode 100644
index 0000000..997f57c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
@@ -0,0 +1,388 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
+// It marshal a message T instead of a *T
+func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			siz := u.size(ptr)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(ptr)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, ptr, deterministic)
+		}
+}
+
+// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
+// It marshals a slice of messages []T instead of []*T
+func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			var err, errreq error
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				b = appendVarint(b, wiretag)
+				siz := u.size(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if err != nil {
+					if _, ok := err.(*RequiredNotSetError); ok {
+						// Required field in submessage is not set.
+						// We record the error but keep going, to give a complete marshaling.
+						if errreq == nil {
+							errreq = err
+						}
+						continue
+					}
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+
+			return b, errreq
+		}
+}
+
+func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..f520106
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -0,0 +1,657 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					mergeInfo.merge(dst, src)
+				}
+			case isSlice: // E.g., []*pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mergeInfo.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mergeInfo.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..bb2622f
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2245 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+
+	bytesExtensions field // offset of XXX_extensions with type []byte
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.bytesExtensions.IsValid() {
+					z = m.offset(u.bytesExtensions).toBytes()
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+	u.bytesExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
+				u.oldExtensions = toField(&f)
+				continue
+			} else if f.Type == reflect.TypeOf(([]byte)(nil)) {
+				u.bytesExtensions = toField(&f)
+				continue
+			}
+			panic("bad type for XXX_extensions field: " + f.Type.Name())
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	// TODO: XXX_OneofFuncs returns more info than we need.  Get rid of some of it?
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs")
+	// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if fn.IsValid() && len(oneofFields) > 0 {
+		res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{}
+		for i := res.Len() - 1; i >= 0; i-- {
+			v := res.Index(i)                             // interface{}
+			tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X
+			typ := tptr.Elem()                            // Msg_X
+
+			f := typ.Field(0) // oneof implementers have one field
+			baseUnmarshal := fieldUnmarshaler(&f)
+			tags := strings.Split(f.Tag.Get("protobuf"), ",")
+			fieldNum, err := strconv.Atoi(tags[1])
+			if err != nil {
+				panic("protobuf tag field not an integer: " + tags[1])
+			}
+			var name string
+			for _, tag := range tags {
+				if strings.HasPrefix(tag, "name=") {
+					name = strings.TrimPrefix(tag, "name=")
+					break
+				}
+			}
+
+			// Find the oneof field that this struct implements.
+			// Might take O(n^2) to process all of the oneofs, but who cares.
+			for _, of := range oneofFields {
+				if tptr.Implements(of.ityp) {
+					// We have found the corresponding interface for this struct.
+					// That lets us know where this struct should be stored
+					// when we encounter it during unmarshaling.
+					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+					u.setTag(fieldNum, of.field, unmarshal, 0, name)
+				}
+			}
+		}
+	}
+
+	// Get extension ranges, if any.
+	fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tag, "customtype=") {
+			ctype = true
+		}
+		if tag == "stdtime" {
+			isTime = true
+		}
+		if tag == "stdduration" {
+			isDuration = true
+		}
+		if tag == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
+			}
+			if pointer {
+				return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalCustom(getUnmarshalInfo(t), name)
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalTime(getUnmarshalInfo(t), name)
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalDuration(getUnmarshalInfo(t), name)
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			switch encoding {
+			case "bytes":
+				if slice {
+					return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
+				}
+				return makeUnmarshalMessage(getUnmarshalInfo(t), name)
+			}
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	for _, t := range tagArray {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
new file mode 100644
index 0000000..00d6c7a
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
@@ -0,0 +1,385 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f // gogo: changed from v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.New(sub.typ))
+		m := s.Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := reflect.New(sub.typ)
+		c := m.Interface().(custom)
+		if err := c.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		v := valToPointer(m)
+		f.appendRef(v, sub.typ)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		m := f.asPointerTo(sub.typ).Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..0407ba8
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,928 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if len(props.Enum) > 0 {
+					if err := tm.writeEnum(w, v, props); err != nil {
+						return err
+					}
+				} else if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		if len(props.Enum) > 0 {
+			if err := tm.writeEnum(w, fv, props); err != nil {
+				return err
+			}
+		} else if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv
+	if pv.CanAddr() {
+		pv = sv.Addr()
+	} else {
+		pv = reflect.New(sv.Type())
+		pv.Elem().Set(sv)
+	}
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	if props != nil {
+		if len(props.CustomType) > 0 {
+			custom, ok := v.Interface().(Marshaler)
+			if ok {
+				data, err := custom.Marshal()
+				if err != nil {
+					return err
+				}
+				if err := writeString(w, string(data)); err != nil {
+					return err
+				}
+				return nil
+			}
+		} else if len(props.CastType) > 0 {
+			if _, ok := v.Interface().(interface {
+				String() string
+			}); ok {
+				switch v.Kind() {
+				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+					reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+					_, err := fmt.Fprintf(w, "%d", v.Interface())
+					return err
+				}
+			}
+		} else if props.StdTime {
+			t, ok := v.Interface().(time.Time)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
+			}
+			tproto, err := timestampProto(t)
+			if err != nil {
+				return err
+			}
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdTime = false
+			err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
+			return err
+		} else if props.StdDuration {
+			d, ok := v.Interface().(time.Duration)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
+			}
+			dproto := durationProto(d)
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdDuration = false
+			err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
+			return err
+		}
+	}
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+			return ferr
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, werr := w.Write(endBraceNewline); werr != nil {
+				return werr
+			}
+			continue
+		}
+		if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+			return ferr
+		}
+		if wire != WireStartGroup {
+			if err = w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err = w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	e := pv.Interface().(Message)
+
+	var m map[int32]Extension
+	var mu sync.Locker
+	if em, ok := e.(extensionsBytes); ok {
+		eb := em.GetExtensions()
+		var err error
+		m, err = BytesToExtensionsMap(*eb)
+		if err != nil {
+			return err
+		}
+		mu = notLocker{}
+	} else if _, ok := e.(extendableProto); ok {
+		ep, _ := extendable(e)
+		m, mu = ep.extensionsRead()
+		if m == nil {
+			return nil
+		}
+	}
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(e, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..1d6c6aa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,57 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+	m, ok := enumStringMaps[props.Enum]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	key := int32(0)
+	if v.Kind() == reflect.Ptr {
+		key = int32(v.Elem().Int())
+	} else {
+		key = int32(v.Int())
+	}
+	s, ok := m[key]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	_, err := fmt.Fprint(w, s)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..1ce0be2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,1018 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+	if len(props.CustomType) > 0 {
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				tc := reflect.TypeOf(new(Marshaler))
+				ok := t.Elem().Implements(tc.Elem())
+				if ok {
+					fv := v
+					flen := fv.Len()
+					if flen == fv.Cap() {
+						nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+						reflect.Copy(nav, fv)
+						fv.Set(nav)
+					}
+					fv.SetLen(flen + 1)
+
+					// Read one.
+					p.back()
+					return p.readAny(fv.Index(flen), props)
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.ValueOf(custom))
+		} else {
+			custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+		}
+		return nil
+	}
+	if props.StdTime {
+		fv := v
+		p.back()
+		props.StdTime = false
+		tproto := &timestamp{}
+		err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
+		props.StdTime = true
+		if err != nil {
+			return err
+		}
+		tim, err := timestampFromProto(tproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ts := fv.Interface().([]*time.Time)
+					ts = append(ts, &tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				} else {
+					ts := fv.Interface().([]time.Time)
+					ts = append(ts, tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&tim))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
+		}
+		return nil
+	}
+	if props.StdDuration {
+		fv := v
+		p.back()
+		props.StdDuration = false
+		dproto := &duration{}
+		err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
+		props.StdDuration = true
+		if err != nil {
+			return err
+		}
+		dur, err := durationFromProto(dproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ds := fv.Interface().([]*time.Duration)
+					ds = append(ds, &dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				} else {
+					ds := fv.Interface().([]time.Duration)
+					ds = append(ds, dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&dur))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
+		}
+		return nil
+	}
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				ntok := p.next()
+				if ntok.err != nil {
+					return ntok.err
+				}
+				if ntok.value == "]" {
+					break
+				}
+				if ntok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", ntok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int8:
+		if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int16:
+		if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint8:
+		if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint16:
+		if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
new file mode 100644
index 0000000..9324f65
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go
@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func timestampFromProto(ts *timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func timestampProto(t time.Time) (*timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
new file mode 100644
index 0000000..38439fa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+
+type timestamp struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *timestamp) Reset()       { *m = timestamp{} }
+func (*timestamp) ProtoMessage()  {}
+func (*timestamp) String() string { return "timestamp<string>" }
+
+func init() {
+	RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..b175d1b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go
@@ -0,0 +1,1888 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
new file mode 100644
index 0000000..c1cf7bf
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
@@ -0,0 +1,113 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+type float64Value struct {
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float64Value) Reset()       { *m = float64Value{} }
+func (*float64Value) ProtoMessage()  {}
+func (*float64Value) String() string { return "float64<string>" }
+
+type float32Value struct {
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float32Value) Reset()       { *m = float32Value{} }
+func (*float32Value) ProtoMessage()  {}
+func (*float32Value) String() string { return "float32<string>" }
+
+type int64Value struct {
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int64Value) Reset()       { *m = int64Value{} }
+func (*int64Value) ProtoMessage()  {}
+func (*int64Value) String() string { return "int64<string>" }
+
+type uint64Value struct {
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint64Value) Reset()       { *m = uint64Value{} }
+func (*uint64Value) ProtoMessage()  {}
+func (*uint64Value) String() string { return "uint64<string>" }
+
+type int32Value struct {
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int32Value) Reset()       { *m = int32Value{} }
+func (*int32Value) ProtoMessage()  {}
+func (*int32Value) String() string { return "int32<string>" }
+
+type uint32Value struct {
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint32Value) Reset()       { *m = uint32Value{} }
+func (*uint32Value) ProtoMessage()  {}
+func (*uint32Value) String() string { return "uint32<string>" }
+
+type boolValue struct {
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *boolValue) Reset()       { *m = boolValue{} }
+func (*boolValue) ProtoMessage()  {}
+func (*boolValue) String() string { return "bool<string>" }
+
+type stringValue struct {
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *stringValue) Reset()       { *m = stringValue{} }
+func (*stringValue) ProtoMessage()  {}
+func (*stringValue) String() string { return "string<string>" }
+
+type bytesValue struct {
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *bytesValue) Reset()       { *m = bytesValue{} }
+func (*bytesValue) ProtoMessage()  {}
+func (*bytesValue) String() string { return "[]byte<string>" }
+
+func init() {
+	RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
+	RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
+	RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
+	RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
+	RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
+	RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
+	RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
+	RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
+	RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
+}
diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
new file mode 100644
index 0000000..ceadde6
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go
@@ -0,0 +1,101 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package sortkeys
+
+import (
+	"sort"
+)
+
+func Strings(l []string) {
+	sort.Strings(l)
+}
+
+func Float64s(l []float64) {
+	sort.Float64s(l)
+}
+
+func Float32s(l []float32) {
+	sort.Sort(Float32Slice(l))
+}
+
+func Int64s(l []int64) {
+	sort.Sort(Int64Slice(l))
+}
+
+func Int32s(l []int32) {
+	sort.Sort(Int32Slice(l))
+}
+
+func Uint64s(l []uint64) {
+	sort.Sort(Uint64Slice(l))
+}
+
+func Uint32s(l []uint32) {
+	sort.Sort(Uint32Slice(l))
+}
+
+func Bools(l []bool) {
+	sort.Sort(BoolSlice(l))
+}
+
+type BoolSlice []bool
+
+func (p BoolSlice) Len() int           { return len(p) }
+func (p BoolSlice) Less(i, j int) bool { return p[j] }
+func (p BoolSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int           { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Int32Slice []int32
+
+func (p Int32Slice) Len() int           { return len(p) }
+func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int           { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Uint32Slice []uint32
+
+func (p Uint32Slice) Len() int           { return len(p) }
+func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+type Float32Slice []float32
+
+func (p Float32Slice) Len() int           { return len(p) }
+func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Float32Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/golang/glog/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
new file mode 100644
index 0000000..387b4eb
--- /dev/null
+++ b/vendor/github.com/golang/glog/README
@@ -0,0 +1,44 @@
+glog
+====
+
+Leveled execution logs for Go.
+
+This is an efficient pure Go implementation of leveled logs in the
+manner of the open source C++ package
+	https://github.com/google/glog
+
+By binding methods to booleans it is possible to use the log package
+without paying the expense of evaluating the arguments to the log.
+Through the -vmodule flag, the package also provides fine-grained
+control over logging at the file level.
+
+The comment from glog.go introduces the ideas:
+
+	Package glog implements logging analogous to the Google-internal
+	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
+	Error, Fatal, plus formatting variants such as Infof. It
+	also provides V-style logging controlled by the -v and
+	-vmodule=file=2 flags.
+	
+	Basic examples:
+	
+		glog.Info("Prepare to repel boarders")
+	
+		glog.Fatalf("Initialization failed: %s", err)
+	
+	See the documentation for the V function for an explanation
+	of these examples:
+	
+		if glog.V(2) {
+			glog.Info("Starting transaction...")
+		}
+	
+		glog.V(2).Infoln("Processed", nItems, "elements")
+
+
+The repository contains an open source version of the log package
+used inside Google. The master copy of the source lives inside
+Google, not here. The code in this repo is for export only and is not itself
+under development. Feature requests will be ignored.
+
+Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
new file mode 100644
index 0000000..54bd7af
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog.go
@@ -0,0 +1,1180 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
+// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
+// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
+//
+// Basic examples:
+//
+//	glog.Info("Prepare to repel boarders")
+//
+//	glog.Fatalf("Initialization failed: %s", err)
+//
+// See the documentation for the V function for an explanation of these examples:
+//
+//	if glog.V(2) {
+//		glog.Info("Starting transaction...")
+//	}
+//
+//	glog.V(2).Infoln("Processed", nItems, "elements")
+//
+// Log output is buffered and written periodically using Flush. Programs
+// should call Flush before exiting to guarantee all log output is written.
+//
+// By default, all log statements write to files in a temporary directory.
+// This package provides several flags that modify this behavior.
+// As a result, flag.Parse must be called before any logging is done.
+//
+//	-logtostderr=false
+//		Logs are written to standard error instead of to files.
+//	-alsologtostderr=false
+//		Logs are written to standard error as well as to files.
+//	-stderrthreshold=ERROR
+//		Log events at or above this severity are logged to standard
+//		error as well as to files.
+//	-log_dir=""
+//		Log files will be written to this directory instead of the
+//		default temporary directory.
+//
+//	Other flags provide aids to debugging.
+//
+//	-log_backtrace_at=""
+//		When set to a file and line number holding a logging statement,
+//		such as
+//			-log_backtrace_at=gopherflakes.go:234
+//		a stack trace will be written to the Info log whenever execution
+//		hits that statement. (Unlike with -vmodule, the ".go" must be
+//		present.)
+//	-v=0
+//		Enable V-leveled logging at the specified level.
+//	-vmodule=""
+//		The syntax of the argument is a comma-separated list of pattern=N,
+//		where pattern is a literal file name (minus the ".go" suffix) or
+//		"glob" pattern and N is a V level. For instance,
+//			-vmodule=gopher*=3
+//		sets the V level to 3 in all Go files whose names begin "gopher".
+//
+package glog
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"flag"
+	"fmt"
+	"io"
+	stdLog "log"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// severity identifies the sort of log: info, warning etc. It also implements
+// the flag.Value interface. The -stderrthreshold flag is of type severity and
+// should be modified only through the flag.Value interface. The values match
+// the corresponding constants in C++.
+type severity int32 // sync/atomic int32
+
+// These constants identify the log levels in order of increasing severity.
+// A message written to a high-severity log file is also written to each
+// lower-severity log file.
+const (
+	infoLog severity = iota
+	warningLog
+	errorLog
+	fatalLog
+	numSeverity = 4
+)
+
+const severityChar = "IWEF"
+
+var severityName = []string{
+	infoLog:    "INFO",
+	warningLog: "WARNING",
+	errorLog:   "ERROR",
+	fatalLog:   "FATAL",
+}
+
+// get returns the value of the severity.
+func (s *severity) get() severity {
+	return severity(atomic.LoadInt32((*int32)(s)))
+}
+
+// set sets the value of the severity.
+func (s *severity) set(val severity) {
+	atomic.StoreInt32((*int32)(s), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (s *severity) String() string {
+	return strconv.FormatInt(int64(*s), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (s *severity) Get() interface{} {
+	return *s
+}
+
+// Set is part of the flag.Value interface.
+func (s *severity) Set(value string) error {
+	var threshold severity
+	// Is it a known name?
+	if v, ok := severityByName(value); ok {
+		threshold = v
+	} else {
+		v, err := strconv.Atoi(value)
+		if err != nil {
+			return err
+		}
+		threshold = severity(v)
+	}
+	logging.stderrThreshold.set(threshold)
+	return nil
+}
+
+func severityByName(s string) (severity, bool) {
+	s = strings.ToUpper(s)
+	for i, name := range severityName {
+		if name == s {
+			return severity(i), true
+		}
+	}
+	return 0, false
+}
+
+// OutputStats tracks the number of output lines and bytes written.
+type OutputStats struct {
+	lines int64
+	bytes int64
+}
+
+// Lines returns the number of lines written.
+func (s *OutputStats) Lines() int64 {
+	return atomic.LoadInt64(&s.lines)
+}
+
+// Bytes returns the number of bytes written.
+func (s *OutputStats) Bytes() int64 {
+	return atomic.LoadInt64(&s.bytes)
+}
+
+// Stats tracks the number of lines of output and number of bytes
+// per severity level. Values must be read with atomic.LoadInt64.
+var Stats struct {
+	Info, Warning, Error OutputStats
+}
+
+var severityStats = [numSeverity]*OutputStats{
+	infoLog:    &Stats.Info,
+	warningLog: &Stats.Warning,
+	errorLog:   &Stats.Error,
+}
+
+// Level is exported because it appears in the arguments to V and is
+// the type of the v flag, which can be set programmatically.
+// It's a distinct type because we want to discriminate it from logType.
+// Variables of type level are only changed under logging.mu.
+// The -v flag is read only with atomic ops, so the state of the logging
+// module is consistent.
+
+// Level is treated as a sync/atomic int32.
+
+// Level specifies a level of verbosity for V logs. *Level implements
+// flag.Value; the -v flag is of type Level and should be modified
+// only through the flag.Value interface.
+type Level int32
+
+// get returns the value of the Level.
+func (l *Level) get() Level {
+	return Level(atomic.LoadInt32((*int32)(l)))
+}
+
+// set sets the value of the Level.
+func (l *Level) set(val Level) {
+	atomic.StoreInt32((*int32)(l), int32(val))
+}
+
+// String is part of the flag.Value interface.
+func (l *Level) String() string {
+	return strconv.FormatInt(int64(*l), 10)
+}
+
+// Get is part of the flag.Value interface.
+func (l *Level) Get() interface{} {
+	return *l
+}
+
+// Set is part of the flag.Value interface.
+func (l *Level) Set(value string) error {
+	v, err := strconv.Atoi(value)
+	if err != nil {
+		return err
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(Level(v), logging.vmodule.filter, false)
+	return nil
+}
+
+// moduleSpec represents the setting of the -vmodule flag.
+type moduleSpec struct {
+	filter []modulePat
+}
+
+// modulePat contains a filter for the -vmodule flag.
+// It holds a verbosity level and a file pattern to match.
+type modulePat struct {
+	pattern string
+	literal bool // The pattern is a literal string
+	level   Level
+}
+
+// match reports whether the file matches the pattern. It uses a string
+// comparison if the pattern contains no metacharacters.
+func (m *modulePat) match(file string) bool {
+	if m.literal {
+		return file == m.pattern
+	}
+	match, _ := filepath.Match(m.pattern, file)
+	return match
+}
+
+func (m *moduleSpec) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	var b bytes.Buffer
+	for i, f := range m.filter {
+		if i > 0 {
+			b.WriteRune(',')
+		}
+		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
+	}
+	return b.String()
+}
+
+// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported.
+func (m *moduleSpec) Get() interface{} {
+	return nil
+}
+
+var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
+
+// Syntax: -vmodule=recordio=2,file=1,gfs*=3
+func (m *moduleSpec) Set(value string) error {
+	var filter []modulePat
+	for _, pat := range strings.Split(value, ",") {
+		if len(pat) == 0 {
+			// Empty strings such as from a trailing comma can be ignored.
+			continue
+		}
+		patLev := strings.Split(pat, "=")
+		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
+			return errVmoduleSyntax
+		}
+		pattern := patLev[0]
+		v, err := strconv.Atoi(patLev[1])
+		if err != nil {
+			return errors.New("syntax error: expect comma-separated list of filename=N")
+		}
+		if v < 0 {
+			return errors.New("negative value for vmodule level")
+		}
+		if v == 0 {
+			continue // Ignore. It's harmless but no point in paying the overhead.
+		}
+		// TODO: check syntax of filter?
+		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	logging.setVState(logging.verbosity, filter, true)
+	return nil
+}
+
+// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
+// that require filepath.Match to be called to match the pattern.
+func isLiteral(pattern string) bool {
+	return !strings.ContainsAny(pattern, `\*?[]`)
+}
+
+// traceLocation represents the setting of the -log_backtrace_at flag.
+type traceLocation struct {
+	file string
+	line int
+}
+
+// isSet reports whether the trace location has been specified.
+// logging.mu is held.
+func (t *traceLocation) isSet() bool {
+	return t.line > 0
+}
+
+// match reports whether the specified file and line matches the trace location.
+// The argument file name is the full path, not the basename specified in the flag.
+// logging.mu is held.
+func (t *traceLocation) match(file string, line int) bool {
+	if t.line != line {
+		return false
+	}
+	if i := strings.LastIndex(file, "/"); i >= 0 {
+		file = file[i+1:]
+	}
+	return t.file == file
+}
+
+func (t *traceLocation) String() string {
+	// Lock because the type is not atomic. TODO: clean this up.
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	return fmt.Sprintf("%s:%d", t.file, t.line)
+}
+
+// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
+// struct is not exported
+func (t *traceLocation) Get() interface{} {
+	return nil
+}
+
+var errTraceSyntax = errors.New("syntax error: expect file.go:234")
+
+// Syntax: -log_backtrace_at=gopherflakes.go:234
+// Note that unlike vmodule the file extension is included here.
+func (t *traceLocation) Set(value string) error {
+	if value == "" {
+		// Unset.
+		t.line = 0
+		t.file = ""
+	}
+	fields := strings.Split(value, ":")
+	if len(fields) != 2 {
+		return errTraceSyntax
+	}
+	file, line := fields[0], fields[1]
+	if !strings.Contains(file, ".") {
+		return errTraceSyntax
+	}
+	v, err := strconv.Atoi(line)
+	if err != nil {
+		return errTraceSyntax
+	}
+	if v <= 0 {
+		return errors.New("negative or zero value for level")
+	}
+	logging.mu.Lock()
+	defer logging.mu.Unlock()
+	t.line = v
+	t.file = file
+	return nil
+}
+
+// flushSyncWriter is the interface satisfied by logging destinations.
+type flushSyncWriter interface {
+	Flush() error
+	Sync() error
+	io.Writer
+}
+
+func init() {
+	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
+	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
+	flag.Var(&logging.verbosity, "v", "log level for V logs")
+	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
+	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
+	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
+
+	// Default stderrThreshold is ERROR.
+	logging.stderrThreshold = errorLog
+
+	logging.setVState(0, nil, false)
+	go logging.flushDaemon()
+}
+
+// Flush flushes all pending log I/O.
+func Flush() {
+	logging.lockAndFlushAll()
+}
+
+// loggingT collects all the global state of the logging setup.
+type loggingT struct {
+	// Boolean flags. Not handled atomically because the flag.Value interface
+	// does not let us avoid the =true, and that shorthand is necessary for
+	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
+	toStderr     bool // The -logtostderr flag.
+	alsoToStderr bool // The -alsologtostderr flag.
+
+	// Level flag. Handled atomically.
+	stderrThreshold severity // The -stderrthreshold flag.
+
+	// freeList is a list of byte buffers, maintained under freeListMu.
+	freeList *buffer
+	// freeListMu maintains the free list. It is separate from the main mutex
+	// so buffers can be grabbed and printed to without holding the main lock,
+	// for better parallelization.
+	freeListMu sync.Mutex
+
+	// mu protects the remaining elements of this structure and is
+	// used to synchronize logging.
+	mu sync.Mutex
+	// file holds writer for each of the log types.
+	file [numSeverity]flushSyncWriter
+	// pcs is used in V to avoid an allocation when computing the caller's PC.
+	pcs [1]uintptr
+	// vmap is a cache of the V Level for each V() call site, identified by PC.
+	// It is wiped whenever the vmodule flag changes state.
+	vmap map[uintptr]Level
+	// filterLength stores the length of the vmodule filter chain. If greater
+	// than zero, it means vmodule is enabled. It may be read safely
+	// using sync.LoadInt32, but is only modified under mu.
+	filterLength int32
+	// traceLocation is the state of the -log_backtrace_at flag.
+	traceLocation traceLocation
+	// These flags are modified only under lock, although verbosity may be fetched
+	// safely using atomic.LoadInt32.
+	vmodule   moduleSpec // The state of the -vmodule flag.
+	verbosity Level      // V logging level, the value of the -v flag/
+}
+
+// buffer holds a byte Buffer for reuse. The zero value is ready for use.
+type buffer struct {
+	bytes.Buffer
+	tmp  [64]byte // temporary byte array for creating headers.
+	next *buffer
+}
+
+var logging loggingT
+
+// setVState sets a consistent state for V logging.
+// l.mu is held.
+func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
+	// Turn verbosity off so V will not fire while we are in transition.
+	logging.verbosity.set(0)
+	// Ditto for filter length.
+	atomic.StoreInt32(&logging.filterLength, 0)
+
+	// Set the new filters and wipe the pc->Level map if the filter has changed.
+	if setFilter {
+		logging.vmodule.filter = filter
+		logging.vmap = make(map[uintptr]Level)
+	}
+
+	// Things are consistent now, so enable filtering and verbosity.
+	// They are enabled in order opposite to that in V.
+	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
+	logging.verbosity.set(verbosity)
+}
+
+// getBuffer returns a new, ready-to-use buffer.
+func (l *loggingT) getBuffer() *buffer {
+	l.freeListMu.Lock()
+	b := l.freeList
+	if b != nil {
+		l.freeList = b.next
+	}
+	l.freeListMu.Unlock()
+	if b == nil {
+		b = new(buffer)
+	} else {
+		b.next = nil
+		b.Reset()
+	}
+	return b
+}
+
+// putBuffer returns a buffer to the free list.
+func (l *loggingT) putBuffer(b *buffer) {
+	if b.Len() >= 256 {
+		// Let big buffers die a natural death.
+		return
+	}
+	l.freeListMu.Lock()
+	b.next = l.freeList
+	l.freeList = b
+	l.freeListMu.Unlock()
+}
+
+var timeNow = time.Now // Stubbed out for testing.
+
+/*
+header formats a log header as defined by the C++ implementation.
+It returns a buffer containing the formatted header and the user's file and line number.
+The depth specifies how many stack frames above lives the source line to be identified in the log message.
+
+Log lines have this form:
+	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
+where the fields are defined as follows:
+	L                A single character, representing the log level (eg 'I' for INFO)
+	mm               The month (zero padded; ie May is '05')
+	dd               The day (zero padded)
+	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
+	threadid         The space-padded thread ID as returned by GetTID()
+	file             The file name
+	line             The line number
+	msg              The user-supplied message
+*/
+func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
+	_, file, line, ok := runtime.Caller(3 + depth)
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	return l.formatHeader(s, file, line), file, line
+}
+
+// formatHeader formats a log header using the provided file name and line number.
+func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
+	now := timeNow()
+	if line < 0 {
+		line = 0 // not a real line number, but acceptable to someDigits
+	}
+	if s > fatalLog {
+		s = infoLog // for safety.
+	}
+	buf := l.getBuffer()
+
+	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
+	// It's worth about 3X. Fprintf is hard.
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	buf.tmp[0] = severityChar[s]
+	buf.twoDigits(1, int(month))
+	buf.twoDigits(3, day)
+	buf.tmp[5] = ' '
+	buf.twoDigits(6, hour)
+	buf.tmp[8] = ':'
+	buf.twoDigits(9, minute)
+	buf.tmp[11] = ':'
+	buf.twoDigits(12, second)
+	buf.tmp[14] = '.'
+	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
+	buf.tmp[21] = ' '
+	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
+	buf.tmp[29] = ' '
+	buf.Write(buf.tmp[:30])
+	buf.WriteString(file)
+	buf.tmp[0] = ':'
+	n := buf.someDigits(1, line)
+	buf.tmp[n+1] = ']'
+	buf.tmp[n+2] = ' '
+	buf.Write(buf.tmp[:n+3])
+	return buf
+}
+
+// Some custom tiny helper functions to print the log header efficiently.
+
+const digits = "0123456789"
+
+// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
+func (buf *buffer) twoDigits(i, d int) {
+	buf.tmp[i+1] = digits[d%10]
+	d /= 10
+	buf.tmp[i] = digits[d%10]
+}
+
+// nDigits formats an n-digit integer at buf.tmp[i],
+// padding with pad on the left.
+// It assumes d >= 0.
+func (buf *buffer) nDigits(n, i, d int, pad byte) {
+	j := n - 1
+	for ; j >= 0 && d > 0; j-- {
+		buf.tmp[i+j] = digits[d%10]
+		d /= 10
+	}
+	for ; j >= 0; j-- {
+		buf.tmp[i+j] = pad
+	}
+}
+
+// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
+func (buf *buffer) someDigits(i, d int) int {
+	// Print into the top, then copy down. We know there's space for at least
+	// a 10-digit number.
+	j := len(buf.tmp)
+	for {
+		j--
+		buf.tmp[j] = digits[d%10]
+		d /= 10
+		if d == 0 {
+			break
+		}
+	}
+	return copy(buf.tmp[i:], buf.tmp[j:])
+}
+
+func (l *loggingT) println(s severity, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintln(buf, args...)
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) print(s severity, args ...interface{}) {
+	l.printDepth(s, 1, args...)
+}
+
+func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
+	buf, file, line := l.header(s, depth)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+func (l *loggingT) printf(s severity, format string, args ...interface{}) {
+	buf, file, line := l.header(s, 0)
+	fmt.Fprintf(buf, format, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, false)
+}
+
+// printWithFileLine behaves like print but uses the provided file and line number.  If
+// alsoLogToStderr is true, the log message always appears on standard error; it
+// will also appear in the log file unless --logtostderr is set.
+func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
+	buf := l.formatHeader(s, file, line)
+	fmt.Fprint(buf, args...)
+	if buf.Bytes()[buf.Len()-1] != '\n' {
+		buf.WriteByte('\n')
+	}
+	l.output(s, buf, file, line, alsoToStderr)
+}
+
+// output writes the data to the log files and releases the buffer.
+func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
+	l.mu.Lock()
+	if l.traceLocation.isSet() {
+		if l.traceLocation.match(file, line) {
+			buf.Write(stacks(false))
+		}
+	}
+	data := buf.Bytes()
+	if !flag.Parsed() {
+		os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
+		os.Stderr.Write(data)
+	} else if l.toStderr {
+		os.Stderr.Write(data)
+	} else {
+		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
+			os.Stderr.Write(data)
+		}
+		if l.file[s] == nil {
+			if err := l.createFiles(s); err != nil {
+				os.Stderr.Write(data) // Make sure the message appears somewhere.
+				l.exit(err)
+			}
+		}
+		switch s {
+		case fatalLog:
+			l.file[fatalLog].Write(data)
+			fallthrough
+		case errorLog:
+			l.file[errorLog].Write(data)
+			fallthrough
+		case warningLog:
+			l.file[warningLog].Write(data)
+			fallthrough
+		case infoLog:
+			l.file[infoLog].Write(data)
+		}
+	}
+	if s == fatalLog {
+		// If we got here via Exit rather than Fatal, print no stacks.
+		if atomic.LoadUint32(&fatalNoStacks) > 0 {
+			l.mu.Unlock()
+			timeoutFlush(10 * time.Second)
+			os.Exit(1)
+		}
+		// Dump all goroutine stacks before exiting.
+		// First, make sure we see the trace for the current goroutine on standard error.
+		// If -logtostderr has been specified, the loop below will do that anyway
+		// as the first stack in the full dump.
+		if !l.toStderr {
+			os.Stderr.Write(stacks(false))
+		}
+		// Write the stack trace for all goroutines to the files.
+		trace := stacks(true)
+		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
+		for log := fatalLog; log >= infoLog; log-- {
+			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
+				f.Write(trace)
+			}
+		}
+		l.mu.Unlock()
+		timeoutFlush(10 * time.Second)
+		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
+	}
+	l.putBuffer(buf)
+	l.mu.Unlock()
+	if stats := severityStats[s]; stats != nil {
+		atomic.AddInt64(&stats.lines, 1)
+		atomic.AddInt64(&stats.bytes, int64(len(data)))
+	}
+}
+
+// timeoutFlush calls Flush and returns when it completes or after timeout
+// elapses, whichever happens first.  This is needed because the hooks invoked
+// by Flush may deadlock when glog.Fatal is called from a hook that holds
+// a lock.
+func timeoutFlush(timeout time.Duration) {
+	done := make(chan bool, 1)
+	go func() {
+		Flush() // calls logging.lockAndFlushAll()
+		done <- true
+	}()
+	select {
+	case <-done:
+	case <-time.After(timeout):
+		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
+	}
+}
+
+// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
+func stacks(all bool) []byte {
+	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
+	n := 10000
+	if all {
+		n = 100000
+	}
+	var trace []byte
+	for i := 0; i < 5; i++ {
+		trace = make([]byte, n)
+		nbytes := runtime.Stack(trace, all)
+		if nbytes < len(trace) {
+			return trace[:nbytes]
+		}
+		n *= 2
+	}
+	return trace
+}
+
+// logExitFunc provides a simple mechanism to override the default behavior
+// of exiting on error. Used in testing and to guarantee we reach a required exit
+// for fatal logs. Instead, exit could be a function rather than a method but that
+// would make its use clumsier.
+var logExitFunc func(error)
+
+// exit is called if there is trouble creating or writing log files.
+// It flushes the logs and exits the program; there's no point in hanging around.
+// l.mu is held.
+func (l *loggingT) exit(err error) {
+	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
+	// If logExitFunc is set, we do that instead of exiting.
+	if logExitFunc != nil {
+		logExitFunc(err)
+		return
+	}
+	l.flushAll()
+	os.Exit(2)
+}
+
+// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
+// file's Sync method and providing a wrapper for the Write method that provides log
+// file rotation. There are conflicting methods, so the file cannot be embedded.
+// l.mu is held for all its methods.
+type syncBuffer struct {
+	logger *loggingT
+	*bufio.Writer
+	file   *os.File
+	sev    severity
+	nbytes uint64 // The number of bytes written to this file
+}
+
+func (sb *syncBuffer) Sync() error {
+	return sb.file.Sync()
+}
+
+func (sb *syncBuffer) Write(p []byte) (n int, err error) {
+	if sb.nbytes+uint64(len(p)) >= MaxSize {
+		if err := sb.rotateFile(time.Now()); err != nil {
+			sb.logger.exit(err)
+		}
+	}
+	n, err = sb.Writer.Write(p)
+	sb.nbytes += uint64(n)
+	if err != nil {
+		sb.logger.exit(err)
+	}
+	return
+}
+
+// rotateFile closes the syncBuffer's file and starts a new one.
+func (sb *syncBuffer) rotateFile(now time.Time) error {
+	if sb.file != nil {
+		sb.Flush()
+		sb.file.Close()
+	}
+	var err error
+	sb.file, _, err = create(severityName[sb.sev], now)
+	sb.nbytes = 0
+	if err != nil {
+		return err
+	}
+
+	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
+
+	// Write header.
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
+	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
+	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
+	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
+	n, err := sb.file.Write(buf.Bytes())
+	sb.nbytes += uint64(n)
+	return err
+}
+
+// bufferSize sizes the buffer associated with each log file. It's large
+// so that log records can accumulate without the logging thread blocking
+// on disk I/O. The flushDaemon will block instead.
+const bufferSize = 256 * 1024
+
+// createFiles creates all the log files for severity from sev down to infoLog.
+// l.mu is held.
+func (l *loggingT) createFiles(sev severity) error {
+	now := time.Now()
+	// Files are created in decreasing severity order, so as soon as we find one
+	// has already been created, we can stop.
+	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
+		sb := &syncBuffer{
+			logger: l,
+			sev:    s,
+		}
+		if err := sb.rotateFile(now); err != nil {
+			return err
+		}
+		l.file[s] = sb
+	}
+	return nil
+}
+
+const flushInterval = 30 * time.Second
+
+// flushDaemon periodically flushes the log file buffers.
+func (l *loggingT) flushDaemon() {
+	for _ = range time.NewTicker(flushInterval).C {
+		l.lockAndFlushAll()
+	}
+}
+
+// lockAndFlushAll is like flushAll but locks l.mu first.
+func (l *loggingT) lockAndFlushAll() {
+	l.mu.Lock()
+	l.flushAll()
+	l.mu.Unlock()
+}
+
+// flushAll flushes all the logs and attempts to "sync" their data to disk.
+// l.mu is held.
+func (l *loggingT) flushAll() {
+	// Flush from fatal down, in case there's trouble flushing.
+	for s := fatalLog; s >= infoLog; s-- {
+		file := l.file[s]
+		if file != nil {
+			file.Flush() // ignore error
+			file.Sync()  // ignore error
+		}
+	}
+}
+
+// CopyStandardLogTo arranges for messages written to the Go "log" package's
+// default logs to also appear in the Google logs for the named and lower
+// severities.  Subsequent changes to the standard log's default output location
+// or format may break this behavior.
+//
+// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
+// recognized, CopyStandardLogTo panics.
+func CopyStandardLogTo(name string) {
+	sev, ok := severityByName(name)
+	if !ok {
+		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
+	}
+	// Set a log format that captures the user's file and line:
+	//   d.go:23: message
+	stdLog.SetFlags(stdLog.Lshortfile)
+	stdLog.SetOutput(logBridge(sev))
+}
+
+// logBridge provides the Write method that enables CopyStandardLogTo to connect
+// Go's standard logs to the logs provided by this package.
+type logBridge severity
+
+// Write parses the standard logging line and passes its components to the
+// logger for severity(lb).
+func (lb logBridge) Write(b []byte) (n int, err error) {
+	var (
+		file = "???"
+		line = 1
+		text string
+	)
+	// Split "d.go:23: message" into "d.go", "23", and "message".
+	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
+		text = fmt.Sprintf("bad log format: %s", b)
+	} else {
+		file = string(parts[0])
+		text = string(parts[2][1:]) // skip leading space
+		line, err = strconv.Atoi(string(parts[1]))
+		if err != nil {
+			text = fmt.Sprintf("bad line number: %s", b)
+			line = 1
+		}
+	}
+	// printWithFileLine with alsoToStderr=true, so standard log messages
+	// always appear on standard error.
+	logging.printWithFileLine(severity(lb), file, line, true, text)
+	return len(b), nil
+}
+
+// setV computes and remembers the V level for a given PC
+// when vmodule is enabled.
+// File pattern matching takes the basename of the file, stripped
+// of its .go suffix, and uses filepath.Match, which is a little more
+// general than the *? matching used in C++.
+// l.mu is held.
+func (l *loggingT) setV(pc uintptr) Level {
+	fn := runtime.FuncForPC(pc)
+	file, _ := fn.FileLine(pc)
+	// The file is something like /a/b/c/d.go. We want just the d.
+	if strings.HasSuffix(file, ".go") {
+		file = file[:len(file)-3]
+	}
+	if slash := strings.LastIndex(file, "/"); slash >= 0 {
+		file = file[slash+1:]
+	}
+	for _, filter := range l.vmodule.filter {
+		if filter.match(file) {
+			l.vmap[pc] = filter.level
+			return filter.level
+		}
+	}
+	l.vmap[pc] = 0
+	return 0
+}
+
+// Verbose is a boolean type that implements Infof (like Printf) etc.
+// See the documentation of V for more information.
+type Verbose bool
+
+// V reports whether verbosity at the call site is at least the requested level.
+// The returned value is a boolean of type Verbose, which implements Info, Infoln
+// and Infof. These methods will write to the Info log if called.
+// Thus, one may write either
+//	if glog.V(2) { glog.Info("log this") }
+// or
+//	glog.V(2).Info("log this")
+// The second form is shorter but the first is cheaper if logging is off because it does
+// not evaluate its arguments.
+//
+// Whether an individual call to V generates a log record depends on the setting of
+// the -v and --vmodule flags; both are off by default. If the level in the call to
+// V is at least the value of -v, or of -vmodule for the source file containing the
+// call, the V call will log.
+func V(level Level) Verbose {
+	// This function tries hard to be cheap unless there's work to do.
+	// The fast path is two atomic loads and compares.
+
+	// Here is a cheap but safe test to see if V logging is enabled globally.
+	if logging.verbosity.get() >= level {
+		return Verbose(true)
+	}
+
+	// It's off globally but it vmodule may still be set.
+	// Here is another cheap but safe test to see if vmodule is enabled.
+	if atomic.LoadInt32(&logging.filterLength) > 0 {
+		// Now we need a proper lock to use the logging structure. The pcs field
+		// is shared so we must lock before accessing it. This is fairly expensive,
+		// but if V logging is enabled we're slow anyway.
+		logging.mu.Lock()
+		defer logging.mu.Unlock()
+		if runtime.Callers(2, logging.pcs[:]) == 0 {
+			return Verbose(false)
+		}
+		v, ok := logging.vmap[logging.pcs[0]]
+		if !ok {
+			v = logging.setV(logging.pcs[0])
+		}
+		return Verbose(v >= level)
+	}
+	return Verbose(false)
+}
+
+// Info is equivalent to the global Info function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Info(args ...interface{}) {
+	if v {
+		logging.print(infoLog, args...)
+	}
+}
+
+// Infoln is equivalent to the global Infoln function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infoln(args ...interface{}) {
+	if v {
+		logging.println(infoLog, args...)
+	}
+}
+
+// Infof is equivalent to the global Infof function, guarded by the value of v.
+// See the documentation of V for usage.
+func (v Verbose) Infof(format string, args ...interface{}) {
+	if v {
+		logging.printf(infoLog, format, args...)
+	}
+}
+
+// Info logs to the INFO log.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Info(args ...interface{}) {
+	logging.print(infoLog, args...)
+}
+
+// InfoDepth acts as Info but uses depth to determine which call frame to log.
+// InfoDepth(0, "msg") is the same as Info("msg").
+func InfoDepth(depth int, args ...interface{}) {
+	logging.printDepth(infoLog, depth, args...)
+}
+
+// Infoln logs to the INFO log.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Infoln(args ...interface{}) {
+	logging.println(infoLog, args...)
+}
+
+// Infof logs to the INFO log.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Infof(format string, args ...interface{}) {
+	logging.printf(infoLog, format, args...)
+}
+
+// Warning logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Warning(args ...interface{}) {
+	logging.print(warningLog, args...)
+}
+
+// WarningDepth acts as Warning but uses depth to determine which call frame to log.
+// WarningDepth(0, "msg") is the same as Warning("msg").
+func WarningDepth(depth int, args ...interface{}) {
+	logging.printDepth(warningLog, depth, args...)
+}
+
+// Warningln logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Warningln(args ...interface{}) {
+	logging.println(warningLog, args...)
+}
+
+// Warningf logs to the WARNING and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Warningf(format string, args ...interface{}) {
+	logging.printf(warningLog, format, args...)
+}
+
+// Error logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Error(args ...interface{}) {
+	logging.print(errorLog, args...)
+}
+
+// ErrorDepth acts as Error but uses depth to determine which call frame to log.
+// ErrorDepth(0, "msg") is the same as Error("msg").
+func ErrorDepth(depth int, args ...interface{}) {
+	logging.printDepth(errorLog, depth, args...)
+}
+
+// Errorln logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Errorln(args ...interface{}) {
+	logging.println(errorLog, args...)
+}
+
+// Errorf logs to the ERROR, WARNING, and INFO logs.
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Errorf(format string, args ...interface{}) {
+	logging.printf(errorLog, format, args...)
+}
+
+// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Fatal(args ...interface{}) {
+	logging.print(fatalLog, args...)
+}
+
+// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
+// FatalDepth(0, "msg") is the same as Fatal("msg").
+func FatalDepth(depth int, args ...interface{}) {
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
+func Fatalln(args ...interface{}) {
+	logging.println(fatalLog, args...)
+}
+
+// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
+// including a stack trace of all running goroutines, then calls os.Exit(255).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Fatalf(format string, args ...interface{}) {
+	logging.printf(fatalLog, format, args...)
+}
+
+// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
+// It allows Exit and relatives to use the Fatal logs.
+var fatalNoStacks uint32
+
+// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
+func Exit(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.print(fatalLog, args...)
+}
+
+// ExitDepth acts as Exit but uses depth to determine which call frame to log.
+// ExitDepth(0, "msg") is the same as Exit("msg").
+func ExitDepth(depth int, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printDepth(fatalLog, depth, args...)
+}
+
+// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+func Exitln(args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.println(fatalLog, args...)
+}
+
+// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
+// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
+func Exitf(format string, args ...interface{}) {
+	atomic.StoreUint32(&fatalNoStacks, 1)
+	logging.printf(fatalLog, format, args...)
+}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
new file mode 100644
index 0000000..65075d2
--- /dev/null
+++ b/vendor/github.com/golang/glog/glog_file.go
@@ -0,0 +1,124 @@
+// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
+//
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// File I/O for logs.
+
+package glog
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+	"os/user"
+	"path/filepath"
+	"strings"
+	"sync"
+	"time"
+)
+
+// MaxSize is the maximum size of a log file in bytes.
+var MaxSize uint64 = 1024 * 1024 * 1800
+
+// logDirs lists the candidate directories for new log files.
+var logDirs []string
+
+// If non-empty, overrides the choice of directory in which to write logs.
+// See createLogDirs for the full list of possible destinations.
+var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
+
+func createLogDirs() {
+	if *logDir != "" {
+		logDirs = append(logDirs, *logDir)
+	}
+	logDirs = append(logDirs, os.TempDir())
+}
+
+var (
+	pid      = os.Getpid()
+	program  = filepath.Base(os.Args[0])
+	host     = "unknownhost"
+	userName = "unknownuser"
+)
+
+func init() {
+	h, err := os.Hostname()
+	if err == nil {
+		host = shortHostname(h)
+	}
+
+	current, err := user.Current()
+	if err == nil {
+		userName = current.Username
+	}
+
+	// Sanitize userName since it may contain filepath separators on Windows.
+	userName = strings.Replace(userName, `\`, "_", -1)
+}
+
+// shortHostname returns its argument, truncating at the first period.
+// For instance, given "www.google.com" it returns "www".
+func shortHostname(hostname string) string {
+	if i := strings.Index(hostname, "."); i >= 0 {
+		return hostname[:i]
+	}
+	return hostname
+}
+
+// logName returns a new log file name containing tag, with start time t, and
+// the name for the symlink for tag.
+func logName(tag string, t time.Time) (name, link string) {
+	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
+		program,
+		host,
+		userName,
+		tag,
+		t.Year(),
+		t.Month(),
+		t.Day(),
+		t.Hour(),
+		t.Minute(),
+		t.Second(),
+		pid)
+	return name, program + "." + tag
+}
+
+var onceLogDirs sync.Once
+
+// create creates a new log file and returns the file and its filename, which
+// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
+// successfully, create also attempts to update the symlink for that tag, ignoring
+// errors.
+func create(tag string, t time.Time) (f *os.File, filename string, err error) {
+	onceLogDirs.Do(createLogDirs)
+	if len(logDirs) == 0 {
+		return nil, "", errors.New("log: no log dirs")
+	}
+	name, link := logName(tag, t)
+	var lastErr error
+	for _, dir := range logDirs {
+		fname := filepath.Join(dir, name)
+		f, err := os.Create(fname)
+		if err == nil {
+			symlink := filepath.Join(dir, link)
+			os.Remove(symlink)        // ignore err
+			os.Symlink(name, symlink) // ignore err
+			return f, fname, nil
+		}
+		lastErr = err
+	}
+	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..ada2b78
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1271 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+	// Whether to render enum values as integers, as opposed to string values.
+	EnumsAsInts bool
+
+	// Whether to render fields with zero values.
+	EmitDefaults bool
+
+	// A string to indent each level by. The presence of this field will
+	// also cause a space to appear between the field separator and
+	// value, and for newlines to be appear between fields and array
+	// elements.
+	Indent string
+
+	// Whether to use the original (.proto) name for fields.
+	OrigName bool
+
+	// A custom URL resolver to use when marshaling Any messages to JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+	// Only the part of typeUrl after the last slash is relevant.
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+	v := reflect.ValueOf(pb)
+	if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return errors.New("Marshal called with nil")
+	}
+	// Check for unset required fields first.
+	if err := checkRequiredFields(pb); err != nil {
+		return err
+	}
+	writer := &errWriter{writer: out}
+	return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+	var buf bytes.Buffer
+	if err := m.Marshal(&buf, pb); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+	if jsm, ok := v.(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(m)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if b, err = json.Marshal(js); err != nil {
+				return err
+			}
+		}
+
+		out.write(string(b))
+		return out.err
+	}
+
+	s := reflect.ValueOf(v).Elem()
+
+	// Handle well-known types.
+	if wkt, ok := v.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			// "Wrappers use the same representation in JSON
+			//  as the wrapped primitive type, ..."
+			sprop := proto.GetProperties(s.Type())
+			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+		case "Any":
+			// Any is a bit more involved.
+			return m.marshalAny(out, v, indent)
+		case "Duration":
+			// "Generated output always contains 0, 3, 6, or 9 fractional digits,
+			//  depending on required precision."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns <= -secondInNanos || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+			}
+			if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+				return errors.New("signs of seconds and nanos do not match")
+			}
+			if s < 0 {
+				ns = -ns
+			}
+			x := fmt.Sprintf("%d.%09d", s, ns)
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`s"`)
+			return out.err
+		case "Struct", "ListValue":
+			// Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+		case "Timestamp":
+			// "RFC 3339, where generated output will always be Z-normalized
+			//  and uses 0, 3, 6 or 9 fractional digits."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns < 0 || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+			}
+			t := time.Unix(s, ns).UTC()
+			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+			x := t.Format("2006-01-02T15:04:05.000000000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`Z"`)
+			return out.err
+		case "Value":
+			// Value has a single oneof.
+			kind := s.Field(0)
+			if kind.IsNil() {
+				// "absence of any variant indicates an error"
+				return errors.New("nil Value")
+			}
+			// oneof -> *T -> T -> T.F
+			x := kind.Elem().Elem().Field(0)
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, x, indent)
+		}
+	}
+
+	out.write("{")
+	if m.Indent != "" {
+		out.write("\n")
+	}
+
+	firstField := true
+
+	if typeURL != "" {
+		if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < s.NumField(); i++ {
+		value := s.Field(i)
+		valueField := s.Type().Field(i)
+		if strings.HasPrefix(valueField.Name, "XXX_") {
+			continue
+		}
+
+		// IsNil will panic on most value kinds.
+		switch value.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Interface:
+			if value.IsNil() {
+				continue
+			}
+		}
+
+		if !m.EmitDefaults {
+			switch value.Kind() {
+			case reflect.Bool:
+				if !value.Bool() {
+					continue
+				}
+			case reflect.Int32, reflect.Int64:
+				if value.Int() == 0 {
+					continue
+				}
+			case reflect.Uint32, reflect.Uint64:
+				if value.Uint() == 0 {
+					continue
+				}
+			case reflect.Float32, reflect.Float64:
+				if value.Float() == 0 {
+					continue
+				}
+			case reflect.String:
+				if value.Len() == 0 {
+					continue
+				}
+			case reflect.Map, reflect.Ptr, reflect.Slice:
+				if value.IsNil() {
+					continue
+				}
+			}
+		}
+
+		// Oneof fields need special handling.
+		if valueField.Tag.Get("protobuf_oneof") != "" {
+			// value is an interface containing &T{real_value}.
+			sv := value.Elem().Elem() // interface -> *T -> T
+			value = sv.Field(0)
+			valueField = sv.Type().Field(0)
+		}
+		prop := jsonProperties(valueField, m.OrigName)
+		if !firstField {
+			m.writeSep(out)
+		}
+		if err := m.marshalField(out, prop, value, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if ep, ok := v.(proto.Message); ok {
+		extensions := proto.RegisteredExtensions(v)
+		// Sort extensions for stable output.
+		ids := make([]int32, 0, len(extensions))
+		for id, desc := range extensions {
+			if !proto.HasExtension(ep, desc) {
+				continue
+			}
+			ids = append(ids, id)
+		}
+		sort.Sort(int32Slice(ids))
+		for _, id := range ids {
+			desc := extensions[id]
+			if desc == nil {
+				// unknown extension
+				continue
+			}
+			ext, extErr := proto.GetExtension(ep, desc)
+			if extErr != nil {
+				return extErr
+			}
+			value := reflect.ValueOf(ext)
+			var prop proto.Properties
+			prop.Parse(desc.Tag)
+			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+			if !firstField {
+				m.writeSep(out)
+			}
+			if err := m.marshalField(out, &prop, value, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+
+	}
+
+	if m.Indent != "" {
+		out.write("\n")
+		out.write(indent)
+	}
+	out.write("}")
+	return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+	if m.Indent != "" {
+		out.write(",\n")
+	} else {
+		out.write(",")
+	}
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	v := reflect.ValueOf(any).Elem()
+	turl := v.Field(0).String()
+	val := v.Field(1).Bytes()
+
+	var msg proto.Message
+	var err error
+	if m.AnyResolver != nil {
+		msg, err = m.AnyResolver.Resolve(turl)
+	} else {
+		msg, err = defaultResolveAny(turl)
+	}
+	if err != nil {
+		return err
+	}
+
+	if err := proto.Unmarshal(val, msg); err != nil {
+		return err
+	}
+
+	if _, ok := msg.(wkt); ok {
+		out.write("{")
+		if m.Indent != "" {
+			out.write("\n")
+		}
+		if err := m.marshalTypeURL(out, indent, turl); err != nil {
+			return err
+		}
+		m.writeSep(out)
+		if m.Indent != "" {
+			out.write(indent)
+			out.write(m.Indent)
+			out.write(`"value": `)
+		} else {
+			out.write(`"value":`)
+		}
+		if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+			return err
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+		}
+		out.write("}")
+		return out.err
+	}
+
+	return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"@type":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	out.write(string(b))
+	return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"`)
+	out.write(prop.JSONName)
+	out.write(`":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	if err := m.marshalValue(out, prop, v, indent); err != nil {
+		return err
+	}
+	return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	var err error
+	v = reflect.Indirect(v)
+
+	// Handle nil pointer
+	if v.Kind() == reflect.Invalid {
+		out.write("null")
+		return out.err
+	}
+
+	// Handle repeated elements.
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+		out.write("[")
+		comma := ""
+		for i := 0; i < v.Len(); i++ {
+			sliceVal := v.Index(i)
+			out.write(comma)
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+			if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write("]")
+		return out.err
+	}
+
+	// Handle well-known types.
+	// Most are handled up in marshalObject (because 99% are messages).
+	if wkt, ok := v.Interface().(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "NullValue":
+			out.write("null")
+			return out.err
+		}
+	}
+
+	// Handle enumerations.
+	if !m.EnumsAsInts && prop.Enum != "" {
+		// Unknown enum values will are stringified by the proto library as their
+		// value. Such values should _not_ be quoted or they will be interpreted
+		// as an enum string instead of their value.
+		enumStr := v.Interface().(fmt.Stringer).String()
+		var valStr string
+		if v.Kind() == reflect.Ptr {
+			valStr = strconv.Itoa(int(v.Elem().Int()))
+		} else {
+			valStr = strconv.Itoa(int(v.Int()))
+		}
+		isKnownEnum := enumStr != valStr
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		out.write(enumStr)
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		return out.err
+	}
+
+	// Handle nested messages.
+	if v.Kind() == reflect.Struct {
+		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+	}
+
+	// Handle maps.
+	// Since Go randomizes map iteration, we sort keys for stable output.
+	if v.Kind() == reflect.Map {
+		out.write(`{`)
+		keys := v.MapKeys()
+		sort.Sort(mapKeys(keys))
+		for i, k := range keys {
+			if i > 0 {
+				out.write(`,`)
+			}
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+
+			// TODO handle map key prop properly
+			b, err := json.Marshal(k.Interface())
+			if err != nil {
+				return err
+			}
+			s := string(b)
+
+			// If the JSON is not a string value, encode it again to make it one.
+			if !strings.HasPrefix(s, `"`) {
+				b, err := json.Marshal(s)
+				if err != nil {
+					return err
+				}
+				s = string(b)
+			}
+
+			out.write(s)
+			out.write(`:`)
+			if m.Indent != "" {
+				out.write(` `)
+			}
+
+			vprop := prop
+			if prop != nil && prop.MapValProp != nil {
+				vprop = prop.MapValProp
+			}
+			if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+				return err
+			}
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write(`}`)
+		return out.err
+	}
+
+	// Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		f := v.Float()
+		var sval string
+		switch {
+		case math.IsInf(f, 1):
+			sval = `"Infinity"`
+		case math.IsInf(f, -1):
+			sval = `"-Infinity"`
+		case math.IsNaN(f):
+			sval = `"NaN"`
+		}
+		if sval != "" {
+			out.write(sval)
+			return out.err
+		}
+	}
+
+	// Default handling defers to the encoding/json library.
+	b, err := json.Marshal(v.Interface())
+	if err != nil {
+		return err
+	}
+	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+	if needToQuote {
+		out.write(`"`)
+	}
+	out.write(string(b))
+	if needToQuote {
+		out.write(`"`)
+	}
+	return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// Whether to allow messages to contain unknown fields, as opposed to
+	// failing to unmarshal.
+	AllowUnknownFields bool
+
+	// A custom URL resolver to use when unmarshaling Any messages from JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	inputValue := json.RawMessage{}
+	if err := dec.Decode(&inputValue); err != nil {
+		return err
+	}
+	if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+		return err
+	}
+	return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+	dec := json.NewDecoder(r)
+	return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+	targetType := target.Type()
+
+	// Allocate memory for pointer fields.
+	if targetType.Kind() == reflect.Ptr {
+		// If input value is "null" and target is a pointer type, then the field should be treated as not set
+		// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+		_, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+		if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+			return nil
+		}
+		target.Set(reflect.New(targetType.Elem()))
+
+		return u.unmarshalValue(target.Elem(), inputValue, prop)
+	}
+
+	if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+	}
+
+	// Handle well-known types that are not pointers.
+	if w, ok := target.Addr().Interface().(wkt); ok {
+		switch w.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			return u.unmarshalValue(target.Field(0), inputValue, prop)
+		case "Any":
+			// Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+			// 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+			// https://github.com/golang/go/issues/14493
+			var jsonFields map[string]*json.RawMessage
+			if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+				return err
+			}
+
+			val, ok := jsonFields["@type"]
+			if !ok || val == nil {
+				return errors.New("Any JSON doesn't have '@type'")
+			}
+
+			var turl string
+			if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+				return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+			}
+			target.Field(0).SetString(turl)
+
+			var m proto.Message
+			var err error
+			if u.AnyResolver != nil {
+				m, err = u.AnyResolver.Resolve(turl)
+			} else {
+				m, err = defaultResolveAny(turl)
+			}
+			if err != nil {
+				return err
+			}
+
+			if _, ok := m.(wkt); ok {
+				val, ok := jsonFields["value"]
+				if !ok {
+					return errors.New("Any JSON doesn't have 'value'")
+				}
+
+				if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			} else {
+				delete(jsonFields, "@type")
+				nestedProto, err := json.Marshal(jsonFields)
+				if err != nil {
+					return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+				}
+
+				if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			}
+
+			b, err := proto.Marshal(m)
+			if err != nil {
+				return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+			}
+			target.Field(1).SetBytes(b)
+
+			return nil
+		case "Duration":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			d, err := time.ParseDuration(unq)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			target.Field(0).SetInt(s)
+			target.Field(1).SetInt(ns)
+			return nil
+		case "Timestamp":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, unq)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+
+			target.Field(0).SetInt(t.Unix())
+			target.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Struct":
+			var m map[string]json.RawMessage
+			if err := json.Unmarshal(inputValue, &m); err != nil {
+				return fmt.Errorf("bad StructValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+			for k, jv := range m {
+				pv := &stpb.Value{}
+				if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+					return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+				}
+				target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+			}
+			return nil
+		case "ListValue":
+			var s []json.RawMessage
+			if err := json.Unmarshal(inputValue, &s); err != nil {
+				return fmt.Errorf("bad ListValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+			for i, sv := range s {
+				if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+					return err
+				}
+			}
+			return nil
+		case "Value":
+			ivStr := string(inputValue)
+			if ivStr == "null" {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+			} else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+			} else if v, err := unquote(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+			} else if v, err := strconv.ParseBool(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+			} else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+				lv := &stpb.ListValue{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+				return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+			} else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+				sv := &stpb.Struct{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+				return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+			} else {
+				return fmt.Errorf("unrecognized type for Value %q", ivStr)
+			}
+			return nil
+		}
+	}
+
+	// Handle enums, which have an underlying type of int32,
+	// and may appear as strings.
+	// The case of an enum appearing as a number is handled
+	// at the bottom of this function.
+	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+		vmap := proto.EnumValueMap(prop.Enum)
+		// Don't need to do unquoting; valid enum names
+		// are from a limited character set.
+		s := inputValue[1 : len(inputValue)-1]
+		n, ok := vmap[string(s)]
+		if !ok {
+			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+		}
+		if target.Kind() == reflect.Ptr { // proto2
+			target.Set(reflect.New(targetType.Elem()))
+			target = target.Elem()
+		}
+		if targetType.Kind() != reflect.Int32 {
+			return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+		}
+		target.SetInt(int64(n))
+		return nil
+	}
+
+	// Handle nested messages.
+	if targetType.Kind() == reflect.Struct {
+		var jsonFields map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+			return err
+		}
+
+		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+			// Be liberal in what names we accept; both orig_name and camelName are okay.
+			fieldNames := acceptedJSONFieldNames(prop)
+
+			vOrig, okOrig := jsonFields[fieldNames.orig]
+			vCamel, okCamel := jsonFields[fieldNames.camel]
+			if !okOrig && !okCamel {
+				return nil, false
+			}
+			// If, for some reason, both are present in the data, favour the camelName.
+			var raw json.RawMessage
+			if okOrig {
+				raw = vOrig
+				delete(jsonFields, fieldNames.orig)
+			}
+			if okCamel {
+				raw = vCamel
+				delete(jsonFields, fieldNames.camel)
+			}
+			return raw, true
+		}
+
+		sprops := proto.GetProperties(targetType)
+		for i := 0; i < target.NumField(); i++ {
+			ft := target.Type().Field(i)
+			if strings.HasPrefix(ft.Name, "XXX_") {
+				continue
+			}
+
+			valueForField, ok := consumeField(sprops.Prop[i])
+			if !ok {
+				continue
+			}
+
+			if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+				return err
+			}
+		}
+		// Check for any oneof fields.
+		if len(jsonFields) > 0 {
+			for _, oop := range sprops.OneofTypes {
+				raw, ok := consumeField(oop.Prop)
+				if !ok {
+					continue
+				}
+				nv := reflect.New(oop.Type.Elem())
+				target.Field(oop.Field).Set(nv)
+				if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+					return err
+				}
+			}
+		}
+		// Handle proto2 extensions.
+		if len(jsonFields) > 0 {
+			if ep, ok := target.Addr().Interface().(proto.Message); ok {
+				for _, ext := range proto.RegisteredExtensions(ep) {
+					name := fmt.Sprintf("[%s]", ext.Name)
+					raw, ok := jsonFields[name]
+					if !ok {
+						continue
+					}
+					delete(jsonFields, name)
+					nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+					if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+						return err
+					}
+					if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		if !u.AllowUnknownFields && len(jsonFields) > 0 {
+			// Pick any field to be the scapegoat.
+			var f string
+			for fname := range jsonFields {
+				f = fname
+				break
+			}
+			return fmt.Errorf("unknown field %q in %v", f, targetType)
+		}
+		return nil
+	}
+
+	// Handle arrays (which aren't encoded bytes)
+	if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+		var slc []json.RawMessage
+		if err := json.Unmarshal(inputValue, &slc); err != nil {
+			return err
+		}
+		if slc != nil {
+			l := len(slc)
+			target.Set(reflect.MakeSlice(targetType, l, l))
+			for i := 0; i < l; i++ {
+				if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	}
+
+	// Handle maps (whose keys are always strings)
+	if targetType.Kind() == reflect.Map {
+		var mp map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &mp); err != nil {
+			return err
+		}
+		if mp != nil {
+			target.Set(reflect.MakeMap(targetType))
+			for ks, raw := range mp {
+				// Unmarshal map key. The core json library already decoded the key into a
+				// string, so we handle that specially. Other types were quoted post-serialization.
+				var k reflect.Value
+				if targetType.Key().Kind() == reflect.String {
+					k = reflect.ValueOf(ks)
+				} else {
+					k = reflect.New(targetType.Key()).Elem()
+					var kprop *proto.Properties
+					if prop != nil && prop.MapKeyProp != nil {
+						kprop = prop.MapKeyProp
+					}
+					if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+						return err
+					}
+				}
+
+				// Unmarshal map value.
+				v := reflect.New(targetType.Elem()).Elem()
+				var vprop *proto.Properties
+				if prop != nil && prop.MapValProp != nil {
+					vprop = prop.MapValProp
+				}
+				if err := u.unmarshalValue(v, raw, vprop); err != nil {
+					return err
+				}
+				target.SetMapIndex(k, v)
+			}
+		}
+		return nil
+	}
+
+	// Non-finite numbers can be encoded as strings.
+	isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isFloat {
+		if num, ok := nonFinite[string(inputValue)]; ok {
+			target.SetFloat(num)
+			return nil
+		}
+	}
+
+	// integers & floats can be encoded as strings. In this case we drop
+	// the quotes and proceed as normal.
+	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+		targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+		targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isNum && strings.HasPrefix(string(inputValue), `"`) {
+		inputValue = inputValue[1 : len(inputValue)-1]
+	}
+
+	// Use the encoding/json for parsing other value types.
+	return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+	var ret string
+	err := json.Unmarshal([]byte(s), &ret)
+	return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+	var prop proto.Properties
+	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+	if origName || prop.JSONName == "" {
+		prop.JSONName = prop.OrigName
+	}
+	return &prop
+}
+
+type fieldNames struct {
+	orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+	if prop.JSONName != "" {
+		opts.camel = prop.JSONName
+	}
+	return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+	writer io.Writer
+	err    error
+}
+
+func (w *errWriter) write(str string) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int      { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+	if k := s[i].Kind(); k == s[j].Kind() {
+		switch k {
+		case reflect.String:
+			return s[i].String() < s[j].String()
+		case reflect.Int32, reflect.Int64:
+			return s[i].Int() < s[j].Int()
+		case reflect.Uint32, reflect.Uint64:
+			return s[i].Uint() < s[j].Uint()
+		}
+	}
+	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal.  While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+	// Most well-known type messages do not contain required fields.  The "Any" type may contain
+	// a message that has required fields.
+	//
+	// When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+	// field in order to transform that into JSON, and that should have returned an error if a
+	// required field is not set in the embedded message.
+	//
+	// When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+	// embedded message to store the serialized message in Any.Value field, and that should have
+	// returned an error if a required field is not set.
+	if _, ok := pb.(wkt); ok {
+		return nil
+	}
+
+	v := reflect.ValueOf(pb)
+	// Skip message if it is not a struct pointer.
+	if v.Kind() != reflect.Ptr {
+		return nil
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return nil
+	}
+
+	for i := 0; i < v.NumField(); i++ {
+		field := v.Field(i)
+		sfield := v.Type().Field(i)
+
+		if sfield.PkgPath != "" {
+			// blank PkgPath means the field is exported; skip if not exported
+			continue
+		}
+
+		if strings.HasPrefix(sfield.Name, "XXX_") {
+			continue
+		}
+
+		// Oneof field is an interface implemented by wrapper structs containing the actual oneof
+		// field, i.e. an interface containing &T{real_value}.
+		if sfield.Tag.Get("protobuf_oneof") != "" {
+			if field.Kind() != reflect.Interface {
+				continue
+			}
+			v := field.Elem()
+			if v.Kind() != reflect.Ptr || v.IsNil() {
+				continue
+			}
+			v = v.Elem()
+			if v.Kind() != reflect.Struct || v.NumField() < 1 {
+				continue
+			}
+			field = v.Field(0)
+			sfield = v.Type().Field(0)
+		}
+
+		protoTag := sfield.Tag.Get("protobuf")
+		if protoTag == "" {
+			continue
+		}
+		var prop proto.Properties
+		prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+		switch field.Kind() {
+		case reflect.Map:
+			if field.IsNil() {
+				continue
+			}
+			// Check each map value.
+			keys := field.MapKeys()
+			for _, k := range keys {
+				v := field.MapIndex(k)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Slice:
+			// Handle non-repeated type, e.g. bytes.
+			if !prop.Repeated {
+				if prop.Required && field.IsNil() {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+
+			// Handle repeated type.
+			if field.IsNil() {
+				continue
+			}
+			// Check each slice item.
+			for i := 0; i < field.Len(); i++ {
+				v := field.Index(i)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Ptr:
+			if field.IsNil() {
+				if prop.Required {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+			if err := checkRequiredFieldsInValue(field); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Handle proto2 extensions.
+	for _, ext := range proto.RegisteredExtensions(pb) {
+		if !proto.HasExtension(pb, ext) {
+			continue
+		}
+		ep, err := proto.GetExtension(pb, ext)
+		if err != nil {
+			return err
+		}
+		err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+	if pm, ok := v.Interface().(proto.Message); ok {
+		return checkRequiredFields(pm)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							di.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						di.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f9b6e41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1 := extensionAsLegacyType(e1.value)
+		m2 := extensionAsLegacyType(e2.value)
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..fa88add
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc *ExtensionDesc
+
+	// value is a concrete value for the extension field. Let the type of
+	// desc.ExtensionType be the "API type" and the type of Extension.value
+	// be the "storage type". The API type and storage type are the same except:
+	//	* For scalars (except []byte), the API type uses *T,
+	//	while the storage type uses T.
+	//	* For repeated fields, the API type uses []T, while the storage type
+	//	uses *[]T.
+	//
+	// The reason for the divergence is so that the storage type more naturally
+	// matches what is expected of when retrieving the values through the
+	// protobuf reflection APIs.
+	//
+	// The value may only be populated if desc is also populated.
+	value interface{}
+
+	// enc is the raw bytes for the extension field.
+	enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if err := checkExtensionTypes(epb, extension); err != nil {
+			return nil, err
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return extensionAsLegacyType(e.value), nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = extensionAsStorageType(v)
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		// Represent primitive types as a pointer to the value.
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Slice:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	}
+	return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	case reflect.Slice:
+		// Represent slice types as a pointer to the value.
+		if rv.Type().Elem().Kind() != reflect.Uint8 {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			v = rv2.Interface()
+		}
+	}
+	return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	if deref {
+		u = u.Elem()
+	}
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	} else {
+		// The interface is not of pointer type. The data word is the pointer
+		// to the data.
+		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+	}
+	if deref {
+		p.p = *(*unsafe.Pointer)(p.p)
+	}
+	return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..79668ff
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,545 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+
+	stype reflect.Type      // set for struct types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	switch t1 := typ; t1.Kind() {
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+
+	case reflect.Slice:
+		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+			p.stype = t2.Elem()
+		}
+
+	case reflect.Map:
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	var oots []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oots = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oots = m.XXX_OneofWrappers()
+	}
+	if len(oots) > 0 {
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+	deref     bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.sizecache = invalidField
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			u.v1extensions = toField(&f)
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+		t = t.Elem()
+	}
+	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	var deref bool
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		t = reflect.PtrTo(t)
+		deref = true
+	}
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizer,
+		marshaler: marshaler,
+		isptr:     t.Kind() == reflect.Ptr,
+		deref:     deref,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizer,
+			marshaler: marshaler,
+		}
+	}
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if slice {
+				return makeMessageSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeMessageMarshaler(getMarshalInfo(t))
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, err := m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("message field %s without pointer", tf))
+			case isSlice: // E.g., []*pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mi.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mi.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+				panic("bad type for XXX_extensions field: " + f.Type.Name())
+			}
+			u.oldExtensions = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+	for _, v := range oneofImplementers {
+		tptr := reflect.TypeOf(v) // *Msg_X
+		typ := tptr.Elem()        // Msg_X
+
+		f := typ.Field(0) // oneof implementers have one field
+		baseUnmarshal := fieldUnmarshaler(&f)
+		tags := strings.Split(f.Tag.Get("protobuf"), ",")
+		fieldNum, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tags[1])
+		}
+		var name string
+		for _, tag := range tags {
+			if strings.HasPrefix(tag, "name=") {
+				name = strings.TrimPrefix(tag, "name=")
+				break
+			}
+		}
+
+		// Find the oneof field that this struct implements.
+		// Might take O(n^2) to process all of the oneofs, but who cares.
+		for _, of := range oneofFields {
+			if tptr.Implements(of.ityp) {
+				// We have found the corresponding interface for this struct.
+				// That lets us know where this struct should be stored
+				// when we encounter it during unmarshaling.
+				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+				u.setTag(fieldNum, of.field, unmarshal, 0, name)
+			}
+		}
+
+	}
+
+	// Get extension ranges, if any.
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..1ded05b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2887 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	// Tag-delimited aggregate.
+	// Group type is deprecated and not supported in proto3. However, Proto3
+	// implementations should still be able to parse the group wire format and
+	// treat group fields as unknown fields.
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
+	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+	0: "IDEMPOTENCY_UNKNOWN",
+	1: "NO_SIDE_EFFECTS",
+	2: "IDEMPOTENT",
+}
+
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+	"IDEMPOTENCY_UNKNOWN": 0,
+	"NO_SIDE_EFFECTS":     1,
+	"IDEMPOTENT":          2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+	p := new(MethodOptions_IdempotencyLevel)
+	*p = x
+	return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+	if err != nil {
+		return err
+	}
+	*x = MethodOptions_IdempotencyLevel(value)
+	return nil
+}
+
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()    {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{0}
+}
+
+func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
+}
+func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
+}
+func (m *FileDescriptorSet) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorSet.Size(m)
+}
+func (m *FileDescriptorSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()    {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{1}
+}
+
+func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
+}
+func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
+}
+func (m *FileDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorProto.Size(m)
+}
+func (m *FileDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()    {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2}
+}
+
+func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
+}
+func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
+}
+func (m *DescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto.Size(m)
+}
+func (m *DescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
+}
+func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+type ExtensionRangeOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage()    {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
+}
+func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
+}
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
+}
+func (m *ExtensionRangeOptions) XXX_Size() int {
+	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
+}
+func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()    {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4}
+}
+
+func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
+}
+func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
+}
+func (m *FieldDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FieldDescriptorProto.Size(m)
+}
+func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()    {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{5}
+}
+
+func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
+}
+func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
+}
+func (m *OneofDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_OneofDescriptorProto.Size(m)
+}
+func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	// Range of reserved numeric values. Reserved numeric values may not be used
+	// by enum values in the same enum declaration. Reserved ranges may not
+	// overlap.
+	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved enum value names, which may not be reused. A given name may only
+	// be reserved once.
+	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()    {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6}
+}
+
+func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
+}
+func (m *EnumDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto.Size(m)
+}
+func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
+func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6, 0}
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()    {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{7}
+}
+
+func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
+}
+func (m *EnumValueDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
+}
+func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()    {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{8}
+}
+
+func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
+}
+func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
+}
+func (m *ServiceDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
+}
+func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()    {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{9}
+}
+
+func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
+}
+func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
+}
+func (m *MethodDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_MethodDescriptorProto.Size(m)
+}
+func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// This option does nothing.
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// By default Swift generators will take the proto package and CamelCase it
+	// replacing '.' with underscore and use that to prefix the types/symbols
+	// defined. When this options is provided, they will use this value instead
+	// to prefix the types/symbols defined.
+	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+	// Sets the php class prefix which is prepended to all php generated classes
+	// from this .proto. Default is empty.
+	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be used
+	// for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// The parser stores options it doesn't recognize here.
+	// See the documentation for the "Options" section above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FileOptions) Reset()         { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()    {}
+func (*FileOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10}
+}
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+func (m *FileOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
+}
+func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
+}
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
+}
+func (m *FileOptions) XXX_Size() int {
+	return xxx_messageInfo_FileOptions.Size(m)
+}
+func (m *FileOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileOptions proto.InternalMessageInfo
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+	if m != nil && m.SwiftPrefix != nil {
+		return *m.SwiftPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+	if m != nil && m.PhpClassPrefix != nil {
+		return *m.PhpClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementions still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()    {}
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{11}
+}
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
+}
+func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
+}
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
+}
+func (m *MessageOptions) XXX_Size() int {
+	return xxx_messageInfo_MessageOptions.Size(m)
+}
+func (m *MessageOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+	// is represented as JavaScript string, which avoids loss of precision that
+	// can happen when a large value is converted to a floating point JavaScript.
+	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+	// use the JavaScript "number" type.  The behavior of the default option
+	// JS_NORMAL is implementation dependent.
+	//
+	// This option is an enum to permit additional types to be added, e.g.
+	// goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()    {}
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12}
+}
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
+}
+func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
+}
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
+}
+func (m *FieldOptions) XXX_Size() int {
+	return xxx_messageInfo_FieldOptions.Size(m)
+}
+func (m *FieldOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()    {}
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{13}
+}
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
+}
+func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
+}
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
+}
+func (m *OneofOptions) XXX_Size() int {
+	return xxx_messageInfo_OneofOptions.Size(m)
+}
+func (m *OneofOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()    {}
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{14}
+}
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
+}
+func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
+}
+func (m *EnumOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumOptions.Size(m)
+}
+func (m *EnumOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()    {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{15}
+}
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
+}
+func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
+}
+func (m *EnumValueOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumValueOptions.Size(m)
+}
+func (m *EnumValueOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()    {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{16}
+}
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
+}
+func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
+}
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
+}
+func (m *ServiceOptions) XXX_Size() int {
+	return xxx_messageInfo_ServiceOptions.Size(m)
+}
+func (m *ServiceOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()    {}
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17}
+}
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
+}
+func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
+}
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
+}
+func (m *MethodOptions) XXX_Size() int {
+	return xxx_messageInfo_MethodOptions.Size(m)
+}
+func (m *MethodOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+	if m != nil && m.IdempotencyLevel != nil {
+		return *m.IdempotencyLevel
+	}
+	return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()    {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18}
+}
+
+func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
+}
+func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
+}
+func (m *UninterpretedOption) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption.Size(m)
+}
+func (m *UninterpretedOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18, 0}
+}
+
+func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
+}
+func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
+}
+func (m *UninterpretedOption_NamePart) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
+}
+func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()    {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19}
+}
+
+func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
+}
+func (m *SourceCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo.Size(m)
+}
+func (m *SourceCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()    {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19, 0}
+}
+
+func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
+}
+func (m *SourceCodeInfo_Location) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
+}
+func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()    {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20}
+}
+
+func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
+}
+func (m *GeneratedCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
+}
+func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+
+var fileDescriptor_e5baabe45344a177 = []byte{
+	// 2589 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+	0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
+	0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
+	0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
+	0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
+	0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
+	0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
+	0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
+	0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
+	0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
+	0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
+	0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
+	0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
+	0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
+	0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
+	0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
+	0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
+	0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
+	0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
+	0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
+	0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
+	0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
+	0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
+	0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
+	0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
+	0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
+	0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
+	0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
+	0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
+	0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
+	0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
+	0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
+	0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
+	0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
+	0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
+	0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
+	0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
+	0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
+	0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
+	0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
+	0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
+	0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
+	0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
+	0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
+	0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
+	0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
+	0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
+	0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
+	0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
+	0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
+	0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
+	0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
+	0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
+	0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
+	0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
+	0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
+	0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
+	0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
+	0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
+	0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
+	0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
+	0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
+	0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
+	0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
+	0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
+	0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
+	0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
+	0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
+	0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
+	0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
+	0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
+	0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
+	0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
+	0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
+	0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
+	0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
+	0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
+	0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
+	0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
+	0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
+	0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
+	0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
+	0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
+	0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
+	0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
+	0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
+	0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
+	0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
+	0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
+	0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
+	0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
+	0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
+	0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
+	0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
+	0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
+	0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
+	0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
+	0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
+	0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
+	0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
+	0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
+	0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
+	0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
+	0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
+	0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
+	0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
+	0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
+	0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
+	0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
+	0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
+	0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
+	0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
+	0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
+	0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
+	0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
+	0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
+	0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
+	0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
+	0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
+	0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
+	0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
+	0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
+	0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
+	0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
+	0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
+	0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
+	0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
+	0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
+	0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
+	0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
+	0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
+	0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
+	0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
+	0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
+	0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
+	0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
+	0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
+	0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
+	0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
+	0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
+	0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
+	0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
+	0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
+	0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
+	0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
+	0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
+	0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
+	0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
+	0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
+	0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
+	0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
+	0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
+	0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
+	0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
+	0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
+	0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
+	0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
+	0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
+	0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
+	0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
+	0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..ed08fcb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,883 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+
+    optional ExtensionRangeOptions options = 3;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+
+  // Range of reserved numeric values. Reserved values may not be used by
+  // entries in the same enum. Reserved ranges may not overlap.
+  //
+  // Note that this is distinct from DescriptorProto.ReservedRange in that it
+  // is inclusive such that it can appropriately represent the entire int32
+  // domain.
+  message EnumReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Inclusive.
+  }
+
+  // Range of reserved numeric values. Reserved numeric values may not be used
+  // by enum values in the same enum declaration. Reserved ranges may not
+  // overlap.
+  repeated EnumReservedRange reserved_range = 4;
+
+  // Reserved enum value names, which may not be reused. A given name may only
+  // be reserved once.
+  repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 42 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+
+  // Use this option to change the namespace of php generated metadata classes.
+  // Default is empty. When this option is empty, the proto file name will be used
+  // for determining the namespace.
+  optional string php_metadata_namespace = 44;
+
+  // Use this option to change the package of ruby generated classes. Default
+  // is empty. When this option is not set, the package name will be used for
+  // determining the ruby package.
+  optional string ruby_package = 45;
+
+  // The parser stores options it doesn't recognize here.
+  // See the documentation for the "Options" section above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message.
+  // See the documentation for the "Options" section above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+  // is represented as JavaScript string, which avoids loss of precision that
+  // can happen when a large value is converted to a floating point JavaScript.
+  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+  // use the JavaScript "number" type.  The behavior of the default option
+  // JS_NORMAL is implementation dependent.
+  //
+  // This option is an enum to permit additional types to be added, e.g.
+  // goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
new file mode 100644
index 0000000..61bfc10
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
@@ -0,0 +1,369 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+/*
+Package plugin_go is a generated protocol buffer package.
+
+It is generated from these files:
+	google/protobuf/compiler/plugin.proto
+
+It has these top-level messages:
+	Version
+	CodeGeneratorRequest
+	CodeGeneratorResponse
+*/
+package plugin_go
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of protocol compiler.
+type Version struct {
+	Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+	Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+	Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+	// be empty for mainline stable releases.
+	Suffix               *string  `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Version) Reset()                    { *m = Version{} }
+func (m *Version) String() string            { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage()               {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Version) Unmarshal(b []byte) error {
+	return xxx_messageInfo_Version.Unmarshal(m, b)
+}
+func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+}
+func (dst *Version) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Version.Merge(dst, src)
+}
+func (m *Version) XXX_Size() int {
+	return xxx_messageInfo_Version.Size(m)
+}
+func (m *Version) XXX_DiscardUnknown() {
+	xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func (m *Version) GetMajor() int32 {
+	if m != nil && m.Major != nil {
+		return *m.Major
+	}
+	return 0
+}
+
+func (m *Version) GetMinor() int32 {
+	if m != nil && m.Minor != nil {
+		return *m.Minor
+	}
+	return 0
+}
+
+func (m *Version) GetPatch() int32 {
+	if m != nil && m.Patch != nil {
+		return *m.Patch
+	}
+	return 0
+}
+
+func (m *Version) GetSuffix() string {
+	if m != nil && m.Suffix != nil {
+		return *m.Suffix
+	}
+	return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+	// The .proto files that were explicitly listed on the command-line.  The
+	// code generator should generate code only for these files.  Each file's
+	// descriptor will be included in proto_file, below.
+	FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+	// The generator parameter passed on the command-line.
+	Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	// FileDescriptorProtos for all files in files_to_generate and everything
+	// they import.  The files will appear in topological order, so each file
+	// appears before any file that imports it.
+	//
+	// protoc guarantees that all proto_files will be written after
+	// the fields above, even though this is not technically guaranteed by the
+	// protobuf wire format.  This theoretically could allow a plugin to stream
+	// in the FileDescriptorProtos and handle them one by one rather than read
+	// the entire set into memory at once.  However, as of this writing, this
+	// is not similarly optimized on protoc's end -- it will store all fields in
+	// memory at once before sending them to the plugin.
+	//
+	// Type names of fields and extensions in the FileDescriptorProto are always
+	// fully qualified.
+	ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+	// The version number of protocol compiler.
+	CompilerVersion      *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorRequest) Reset()                    { *m = CodeGeneratorRequest{} }
+func (m *CodeGeneratorRequest) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorRequest) ProtoMessage()               {}
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
+}
+func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
+}
+func (m *CodeGeneratorRequest) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorRequest.Size(m)
+}
+func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
+
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
+	if m != nil {
+		return m.FileToGenerate
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetParameter() string {
+	if m != nil && m.Parameter != nil {
+		return *m.Parameter
+	}
+	return ""
+}
+
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
+	if m != nil {
+		return m.ProtoFile
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
+	if m != nil {
+		return m.CompilerVersion
+	}
+	return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+	// Error message.  If non-empty, code generation failed.  The plugin process
+	// should exit with status code zero even if it reports an error in this way.
+	//
+	// This should be used to indicate errors in .proto files which prevent the
+	// code generator from generating correct code.  Errors which indicate a
+	// problem in protoc itself -- such as the input CodeGeneratorRequest being
+	// unparseable -- should be reported by writing a message to stderr and
+	// exiting with a non-zero status code.
+	Error                *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File                 []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *CodeGeneratorResponse) Reset()                    { *m = CodeGeneratorResponse{} }
+func (m *CodeGeneratorResponse) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse) ProtoMessage()               {}
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse.Size(m)
+}
+func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse) GetError() string {
+	if m != nil && m.Error != nil {
+		return *m.Error
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+	// The file name, relative to the output directory.  The name must not
+	// contain "." or ".." components and must be relative, not be absolute (so,
+	// the file cannot lie outside the output directory).  "/" must be used as
+	// the path separator, not "\".
+	//
+	// If the name is omitted, the content will be appended to the previous
+	// file.  This allows the generator to break large files into small chunks,
+	// and allows the generated text to be streamed back to protoc so that large
+	// files need not reside completely in memory at one time.  Note that as of
+	// this writing protoc does not optimize for this -- it will read the entire
+	// CodeGeneratorResponse before writing files to disk.
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// If non-empty, indicates that the named file should already exist, and the
+	// content here is to be inserted into that file at a defined insertion
+	// point.  This feature allows a code generator to extend the output
+	// produced by another code generator.  The original generator may provide
+	// insertion points by placing special annotations in the file that look
+	// like:
+	//   @@protoc_insertion_point(NAME)
+	// The annotation can have arbitrary text before and after it on the line,
+	// which allows it to be placed in a comment.  NAME should be replaced with
+	// an identifier naming the point -- this is what other generators will use
+	// as the insertion_point.  Code inserted at this point will be placed
+	// immediately above the line containing the insertion point (thus multiple
+	// insertions to the same point will come out in the order they were added).
+	// The double-@ is intended to make it unlikely that the generated code
+	// could contain things that look like insertion points by accident.
+	//
+	// For example, the C++ code generator places the following line in the
+	// .pb.h files that it generates:
+	//   // @@protoc_insertion_point(namespace_scope)
+	// This line appears within the scope of the file's package namespace, but
+	// outside of any particular class.  Another plugin can then specify the
+	// insertion_point "namespace_scope" to generate additional classes or
+	// other declarations that should be placed in this scope.
+	//
+	// Note that if the line containing the insertion point begins with
+	// whitespace, the same whitespace will be added to every line of the
+	// inserted text.  This is useful for languages like Python, where
+	// indentation matters.  In these languages, the insertion point comment
+	// should be indented the same amount as any inserted code will need to be
+	// in order to work correctly in that context.
+	//
+	// The code generator that generates the initial file and the one which
+	// inserts into it must both run as part of a single invocation of protoc.
+	// Code generators are executed in the order in which they appear on the
+	// command line.
+	//
+	// If |insertion_point| is present, |name| must also be present.
+	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+	// The file contents.
+	Content              *string  `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorResponse_File) Reset()                    { *m = CodeGeneratorResponse_File{} }
+func (m *CodeGeneratorResponse_File) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse_File) ProtoMessage()               {}
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse_File) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
+}
+func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse_File) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if m != nil && m.InsertionPoint != nil {
+		return *m.InsertionPoint
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetContent() string {
+	if m != nil && m.Content != nil {
+		return *m.Content
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
+	proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
+	proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
+	proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
+}
+
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
+	0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
+	0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
+	0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
+	0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
+	0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
+	0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
+	0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
+	0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
+	0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
+	0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
+	0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
+	0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
+	0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
+	0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
+	0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
+	0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
+	0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
+	0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
+	0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
+	0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
+	0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
+	0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
+	0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
+	0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
+	0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 0000000..8953d0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+	FileToGenerate   []string                               `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+	Parameter        *string                                `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+	XXX_unrecognized []byte                                 `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset()         { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage()       {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+	if this != nil && this.Parameter != nil {
+		return *this.Parameter
+	}
+	return ""
+}
+
+type CodeGeneratorResponse struct {
+	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset()         { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+	if this != nil && this.Error != nil {
+		return *this.Error
+	}
+	return ""
+}
+
+type CodeGeneratorResponse_File struct {
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	InsertionPoint   *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset()         { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+	if this != nil && this.Name != nil {
+		return *this.Name
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if this != nil && this.InsertionPoint != nil {
+		return *this.InsertionPoint
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+	if this != nil && this.Content != nil {
+		return *this.Content
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to
+//   change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path.  The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+  optional int32 major = 1;
+  optional int32 minor = 2;
+  optional int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+  // The .proto files that were explicitly listed on the command-line.  The
+  // code generator should generate code only for these files.  Each file's
+  // descriptor will be included in proto_file, below.
+  repeated string file_to_generate = 1;
+
+  // The generator parameter passed on the command-line.
+  optional string parameter = 2;
+
+  // FileDescriptorProtos for all files in files_to_generate and everything
+  // they import.  The files will appear in topological order, so each file
+  // appears before any file that imports it.
+  //
+  // protoc guarantees that all proto_files will be written after
+  // the fields above, even though this is not technically guaranteed by the
+  // protobuf wire format.  This theoretically could allow a plugin to stream
+  // in the FileDescriptorProtos and handle them one by one rather than read
+  // the entire set into memory at once.  However, as of this writing, this
+  // is not similarly optimized on protoc's end -- it will store all fields in
+  // memory at once before sending them to the plugin.
+  //
+  // Type names of fields and extensions in the FileDescriptorProto are always
+  // fully qualified.
+  repeated FileDescriptorProto proto_file = 15;
+
+  // The version number of protocol compiler.
+  optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+  // Error message.  If non-empty, code generation failed.  The plugin process
+  // should exit with status code zero even if it reports an error in this way.
+  //
+  // This should be used to indicate errors in .proto files which prevent the
+  // code generator from generating correct code.  Errors which indicate a
+  // problem in protoc itself -- such as the input CodeGeneratorRequest being
+  // unparseable -- should be reported by writing a message to stderr and
+  // exiting with a non-zero status code.
+  optional string error = 1;
+
+  // Represents a single generated file.
+  message File {
+    // The file name, relative to the output directory.  The name must not
+    // contain "." or ".." components and must be relative, not be absolute (so,
+    // the file cannot lie outside the output directory).  "/" must be used as
+    // the path separator, not "\".
+    //
+    // If the name is omitted, the content will be appended to the previous
+    // file.  This allows the generator to break large files into small chunks,
+    // and allows the generated text to be streamed back to protoc so that large
+    // files need not reside completely in memory at one time.  Note that as of
+    // this writing protoc does not optimize for this -- it will read the entire
+    // CodeGeneratorResponse before writing files to disk.
+    optional string name = 1;
+
+    // If non-empty, indicates that the named file should already exist, and the
+    // content here is to be inserted into that file at a defined insertion
+    // point.  This feature allows a code generator to extend the output
+    // produced by another code generator.  The original generator may provide
+    // insertion points by placing special annotations in the file that look
+    // like:
+    //   @@protoc_insertion_point(NAME)
+    // The annotation can have arbitrary text before and after it on the line,
+    // which allows it to be placed in a comment.  NAME should be replaced with
+    // an identifier naming the point -- this is what other generators will use
+    // as the insertion_point.  Code inserted at this point will be placed
+    // immediately above the line containing the insertion point (thus multiple
+    // insertions to the same point will come out in the order they were added).
+    // The double-@ is intended to make it unlikely that the generated code
+    // could contain things that look like insertion points by accident.
+    //
+    // For example, the C++ code generator places the following line in the
+    // .pb.h files that it generates:
+    //   // @@protoc_insertion_point(namespace_scope)
+    // This line appears within the scope of the file's package namespace, but
+    // outside of any particular class.  Another plugin can then specify the
+    // insertion_point "namespace_scope" to generate additional classes or
+    // other declarations that should be placed in this scope.
+    //
+    // Note that if the line containing the insertion point begins with
+    // whitespace, the same whitespace will be added to every line of the
+    // inserted text.  This is useful for languages like Python, where
+    // indentation matters.  In these languages, the insertion point comment
+    // should be indented the same amount as any inserted code will need to be
+    // in order to work correctly in that context.
+    //
+    // The code generator that generates the initial file and the one which
+    // inserts into it must both run as part of a single invocation of protoc.
+    // Code generators are executed in the order in which they appear on the
+    // command line.
+    //
+    // If |insertion_point| is present, |name| must also be present.
+    optional string insertion_point = 2;
+
+    // The file contents.
+    optional string content = 15;
+  }
+  repeated File file = 15;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+	// but it avoids scanning TypeUrl for the slash.
+	if any == nil {
+		return false
+	}
+	name := proto.MessageName(pb)
+	prefix := len(any.TypeUrl) - len(name)
+	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..78ee523
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package any
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	// A URL/resource name that uniquely identifies the type of the serialized
+	// protocol buffer message. The last segment of the URL's path must represent
+	// the fully qualified name of the type (as in
+	// `path/google.protobuf.Duration`). The name should be in a canonical form
+	// (e.g., leading "." is not accepted).
+	//
+	// In practice, teams usually precompile into the binary all types that they
+	// expect it to use in the context of Any. However, for URLs which use the
+	// scheme `http`, `https`, or no scheme, one can optionally set up a type
+	// server that maps type URLs to message definitions as follows:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Note: this functionality is not currently available in the official
+	// protobuf release, and it is not used for type URLs beginning with
+	// type.googleapis.com.
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Any) Reset()         { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()    {}
+func (*Any) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+	return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+	xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
+
+func (m *Any) GetTypeUrl() string {
+	if m != nil {
+		return m.TypeUrl
+	}
+	return ""
+}
+
+func (m *Any) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..4932942
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,154 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name that uniquely identifies the type of the serialized
+  // protocol buffer message. The last segment of the URL's path must represent
+  // the fully qualified name of the type (as in
+  // `path/google.protobuf.Duration`). The name should be in a canonical form
+  // (e.g., leading "." is not accepted).
+  //
+  // In practice, teams usually precompile into the binary all types that they
+  // expect it to use in the context of Any. However, for URLs which use the
+  // scheme `http`, `https`, or no scheme, one can optionally set up a type
+  // server that maps type URLs to message definitions as follows:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Note: this functionality is not currently available in the official
+  // protobuf release, and it is not used for type URLs beginning with
+  // type.googleapis.com.
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..26d1ca2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos) * time.Nanosecond
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..0d681ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package duration
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
+	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Duration) Reset()         { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()    {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+}
+
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+	return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *Duration) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+	// 190 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..b4eb03e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package empty
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_900544acb223d5b8, []int{0}
+}
+
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+
+var fileDescriptor_900544acb223d5b8 = []byte{
+	// 148 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+	0xb7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..33daa73
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+	0: "NULL_VALUE",
+}
+
+var NullValue_value = map[string]int32{
+	"NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+	return proto.EnumName(NullValue_name, int32(x))
+}
+
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	// Unordered map of dynamically typed values.
+	Fields               map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Struct) Reset()         { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage()    {}
+func (*Struct) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+	return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+	xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+	if m != nil {
+		return m.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	// The kind of value.
+	//
+	// Types that are valid to be assigned to Kind:
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind                 isValue_Kind `protobuf_oneof:"kind"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *Value) Reset()         { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage()    {}
+func (*Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{1}
+}
+
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+	return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+	if x, ok := m.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+	if x, ok := m.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (m *Value) GetStringValue() string {
+	if x, ok := m.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+	if x, ok := m.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+	if x, ok := m.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+	if x, ok := m.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	// Repeated field of dynamically typed values.
+	Values               []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListValue) Reset()         { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage()    {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{2}
+}
+
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+	return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+	if m != nil {
+		return m.Values
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+	proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+	0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+	0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+	0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+	0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+	0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+	0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+	0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+	0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+	0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+	0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+	0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+	0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+	0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+	0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+	0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+	0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+	0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+	0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+	0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+	0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+	0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+	0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+	0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+	0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+	0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8da0df0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,132 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	ts := &tspb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..31cd846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Timestamp) Reset()         { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()    {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+	// 191 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..eafb3fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..add19a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrappers
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	// The double value.
+	Value                float64  `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DoubleValue) Reset()         { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage()    {}
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{0}
+}
+
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
+}
+func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
+}
+func (m *DoubleValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DoubleValue.Merge(m, src)
+}
+func (m *DoubleValue) XXX_Size() int {
+	return xxx_messageInfo_DoubleValue.Size(m)
+}
+func (m *DoubleValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_DoubleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
+
+func (m *DoubleValue) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	// The float value.
+	Value                float32  `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FloatValue) Reset()         { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage()    {}
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{1}
+}
+
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FloatValue.Unmarshal(m, b)
+}
+func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
+}
+func (m *FloatValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FloatValue.Merge(m, src)
+}
+func (m *FloatValue) XXX_Size() int {
+	return xxx_messageInfo_FloatValue.Size(m)
+}
+func (m *FloatValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_FloatValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FloatValue proto.InternalMessageInfo
+
+func (m *FloatValue) GetValue() float32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	// The int64 value.
+	Value                int64    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int64Value) Reset()         { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage()    {}
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{2}
+}
+
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int64Value.Unmarshal(m, b)
+}
+func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
+}
+func (m *Int64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int64Value.Merge(m, src)
+}
+func (m *Int64Value) XXX_Size() int {
+	return xxx_messageInfo_Int64Value.Size(m)
+}
+func (m *Int64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64Value proto.InternalMessageInfo
+
+func (m *Int64Value) GetValue() int64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	// The uint64 value.
+	Value                uint64   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt64Value) Reset()         { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage()    {}
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{3}
+}
+
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
+}
+func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
+}
+func (m *UInt64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt64Value.Merge(m, src)
+}
+func (m *UInt64Value) XXX_Size() int {
+	return xxx_messageInfo_UInt64Value.Size(m)
+}
+func (m *UInt64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
+
+func (m *UInt64Value) GetValue() uint64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	// The int32 value.
+	Value                int32    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int32Value) Reset()         { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage()    {}
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{4}
+}
+
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int32Value.Unmarshal(m, b)
+}
+func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
+}
+func (m *Int32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int32Value.Merge(m, src)
+}
+func (m *Int32Value) XXX_Size() int {
+	return xxx_messageInfo_Int32Value.Size(m)
+}
+func (m *Int32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32Value proto.InternalMessageInfo
+
+func (m *Int32Value) GetValue() int32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	// The uint32 value.
+	Value                uint32   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt32Value) Reset()         { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage()    {}
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{5}
+}
+
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
+}
+func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
+}
+func (m *UInt32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt32Value.Merge(m, src)
+}
+func (m *UInt32Value) XXX_Size() int {
+	return xxx_messageInfo_UInt32Value.Size(m)
+}
+func (m *UInt32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
+
+func (m *UInt32Value) GetValue() uint32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	// The bool value.
+	Value                bool     `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BoolValue) Reset()         { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage()    {}
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{6}
+}
+
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BoolValue.Unmarshal(m, b)
+}
+func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
+}
+func (m *BoolValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BoolValue.Merge(m, src)
+}
+func (m *BoolValue) XXX_Size() int {
+	return xxx_messageInfo_BoolValue.Size(m)
+}
+func (m *BoolValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BoolValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolValue proto.InternalMessageInfo
+
+func (m *BoolValue) GetValue() bool {
+	if m != nil {
+		return m.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	// The string value.
+	Value                string   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StringValue) Reset()         { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage()    {}
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{7}
+}
+
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_StringValue.Unmarshal(m, b)
+}
+func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
+}
+func (m *StringValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StringValue.Merge(m, src)
+}
+func (m *StringValue) XXX_Size() int {
+	return xxx_messageInfo_StringValue.Size(m)
+}
+func (m *StringValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_StringValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringValue proto.InternalMessageInfo
+
+func (m *StringValue) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	// The bytes value.
+	Value                []byte   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BytesValue) Reset()         { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage()    {}
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{8}
+}
+
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BytesValue.Unmarshal(m, b)
+}
+func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
+}
+func (m *BytesValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesValue.Merge(m, src)
+}
+func (m *BytesValue) XXX_Size() int {
+	return xxx_messageInfo_BytesValue.Size(m)
+}
+func (m *BytesValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BytesValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesValue proto.InternalMessageInfo
+
+func (m *BytesValue) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+	proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+	proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+	proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+	proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+	proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+	proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
+
+var fileDescriptor_5377b62bda767935 = []byte{
+	// 259 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+	0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+	0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+	0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+	0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+	0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+	0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+	0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+	0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+	0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+	0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+	0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+	0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+	0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+	0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+	0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 0000000..0194763
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+  // The double value.
+  double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+  // The float value.
+  float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+  // The int64 value.
+  int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+  // The uint64 value.
+  uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+  // The int32 value.
+  int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+  // The uint32 value.
+  uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+  // The string value.
+  string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+  // The bytes value.
+  bytes value = 1;
+}
diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml
new file mode 100644
index 0000000..f8684d9
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/.travis.yml
@@ -0,0 +1,13 @@
+language: go
+
+go:
+  - 1.4
+  - 1.3
+  - 1.2
+  - tip
+
+install:
+  - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
+
+script:
+  - go test -cover
diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
new file mode 100644
index 0000000..51cf5cd
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md
@@ -0,0 +1,67 @@
+# How to contribute #
+
+We'd love to accept your patches and contributions to this project.  There are
+a just a few small guidelines you need to follow.
+
+
+## Contributor License Agreement ##
+
+Contributions to any Google project must be accompanied by a Contributor
+License Agreement.  This is not a copyright **assignment**, it simply gives
+Google permission to use and redistribute your contributions as part of the
+project.
+
+  * If you are an individual writing original source code and you're sure you
+    own the intellectual property, then you'll need to sign an [individual
+    CLA][].
+
+  * If you work for a company that wants to allow you to contribute your work,
+    then you'll need to sign a [corporate CLA][].
+
+You generally only need to submit a CLA once, so if you've already submitted
+one (even if it was for a different project), you probably don't need to do it
+again.
+
+[individual CLA]: https://developers.google.com/open-source/cla/individual
+[corporate CLA]: https://developers.google.com/open-source/cla/corporate
+
+
+## Submitting a patch ##
+
+  1. It's generally best to start by opening a new issue describing the bug or
+     feature you're intending to fix.  Even if you think it's relatively minor,
+     it's helpful to know what people are working on.  Mention in the initial
+     issue that you are planning to work on that bug or feature so that it can
+     be assigned to you.
+
+  1. Follow the normal process of [forking][] the project, and setup a new
+     branch to work in.  It's important that each group of changes be done in
+     separate branches in order to ensure that a pull request only includes the
+     commits related to that bug or feature.
+
+  1. Go makes it very simple to ensure properly formatted code, so always run
+     `go fmt` on your code before committing it.  You should also run
+     [golint][] over your code.  As noted in the [golint readme][], it's not
+     strictly necessary that your code be completely "lint-free", but this will
+     help you find common style issues.
+
+  1. Any significant changes should almost always be accompanied by tests.  The
+     project already has good test coverage, so look at some of the existing
+     tests if you're unsure how to go about it.  [gocov][] and [gocov-html][]
+     are invaluable tools for seeing which parts of your code aren't being
+     exercised by your tests.
+
+  1. Do your best to have [well-formed commit messages][] for each change.
+     This provides consistency throughout the project, and ensures that commit
+     messages are able to be formatted properly by various git tools.
+
+  1. Finally, push the commits to your fork and submit a [pull request][].
+
+[forking]: https://help.github.com/articles/fork-a-repo
+[golint]: https://github.com/golang/lint
+[golint readme]: https://github.com/golang/lint/blob/master/README
+[gocov]: https://github.com/axw/gocov
+[gocov-html]: https://github.com/matm/gocov-html
+[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
+[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits
+[pull request]: https://help.github.com/articles/creating-a-pull-request
diff --git a/vendor/github.com/google/gofuzz/LICENSE b/vendor/github.com/google/gofuzz/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md
new file mode 100644
index 0000000..64869af
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/README.md
@@ -0,0 +1,71 @@
+gofuzz
+======
+
+gofuzz is a library for populating go objects with random values.
+
+[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.png)](https://godoc.org/github.com/google/gofuzz)
+[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz)
+
+This is useful for testing:
+
+* Do your project's objects really serialize/unserialize correctly in all cases?
+* Is there an incorrectly formatted object that will cause your project to panic?
+
+Import with ```import "github.com/google/gofuzz"```
+
+You can use it on single variables:
+```go
+f := fuzz.New()
+var myInt int
+f.Fuzz(&myInt) // myInt gets a random value.
+```
+
+You can use it on maps:
+```go
+f := fuzz.New().NilChance(0).NumElements(1, 1)
+var myMap map[ComplexKeyType]string
+f.Fuzz(&myMap) // myMap will have exactly one element.
+```
+
+Customize the chance of getting a nil pointer:
+```go
+f := fuzz.New().NilChance(.5)
+var fancyStruct struct {
+  A, B, C, D *string
+}
+f.Fuzz(&fancyStruct) // About half the pointers should be set.
+```
+
+You can even customize the randomization completely if needed:
+```go
+type MyEnum string
+const (
+        A MyEnum = "A"
+        B MyEnum = "B"
+)
+type MyInfo struct {
+        Type MyEnum
+        AInfo *string
+        BInfo *string
+}
+
+f := fuzz.New().NilChance(0).Funcs(
+        func(e *MyInfo, c fuzz.Continue) {
+                switch c.Intn(2) {
+                case 0:
+                        e.Type = A
+                        c.Fuzz(&e.AInfo)
+                case 1:
+                        e.Type = B
+                        c.Fuzz(&e.BInfo)
+                }
+        },
+)
+
+var myObject MyInfo
+f.Fuzz(&myObject) // Type will correspond to whether A or B info is set.
+```
+
+See more examples in ```example_test.go```.
+
+Happy testing!
diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go
new file mode 100644
index 0000000..9f9956d
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package fuzz is a library for populating go objects with random values.
+package fuzz
diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go
new file mode 100644
index 0000000..1dfa80a
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/fuzz.go
@@ -0,0 +1,487 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package fuzz
+
+import (
+	"fmt"
+	"math/rand"
+	"reflect"
+	"time"
+)
+
+// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type.
+type fuzzFuncMap map[reflect.Type]reflect.Value
+
+// Fuzzer knows how to fill any object with random fields.
+type Fuzzer struct {
+	fuzzFuncs        fuzzFuncMap
+	defaultFuzzFuncs fuzzFuncMap
+	r                *rand.Rand
+	nilChance        float64
+	minElements      int
+	maxElements      int
+	maxDepth         int
+}
+
+// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
+// RandSource, NilChance, or NumElements in any order.
+func New() *Fuzzer {
+	return NewWithSeed(time.Now().UnixNano())
+}
+
+func NewWithSeed(seed int64) *Fuzzer {
+	f := &Fuzzer{
+		defaultFuzzFuncs: fuzzFuncMap{
+			reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
+		},
+
+		fuzzFuncs:   fuzzFuncMap{},
+		r:           rand.New(rand.NewSource(seed)),
+		nilChance:   .2,
+		minElements: 1,
+		maxElements: 10,
+		maxDepth:    100,
+	}
+	return f
+}
+
+// Funcs adds each entry in fuzzFuncs as a custom fuzzing function.
+//
+// Each entry in fuzzFuncs must be a function taking two parameters.
+// The first parameter must be a pointer or map. It is the variable that
+// function will fill with random data. The second parameter must be a
+// fuzz.Continue, which will provide a source of randomness and a way
+// to automatically continue fuzzing smaller pieces of the first parameter.
+//
+// These functions are called sensibly, e.g., if you wanted custom string
+// fuzzing, the function `func(s *string, c fuzz.Continue)` would get
+// called and passed the address of strings. Maps and pointers will always
+// be made/new'd for you, ignoring the NilChange option. For slices, it
+// doesn't make much sense to  pre-create them--Fuzzer doesn't know how
+// long you want your slice--so take a pointer to a slice, and make it
+// yourself. (If you don't want your map/pointer type pre-made, take a
+// pointer to it, and make it yourself.) See the examples for a range of
+// custom functions.
+func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer {
+	for i := range fuzzFuncs {
+		v := reflect.ValueOf(fuzzFuncs[i])
+		if v.Kind() != reflect.Func {
+			panic("Need only funcs!")
+		}
+		t := v.Type()
+		if t.NumIn() != 2 || t.NumOut() != 0 {
+			panic("Need 2 in and 0 out params!")
+		}
+		argT := t.In(0)
+		switch argT.Kind() {
+		case reflect.Ptr, reflect.Map:
+		default:
+			panic("fuzzFunc must take pointer or map type")
+		}
+		if t.In(1) != reflect.TypeOf(Continue{}) {
+			panic("fuzzFunc's second parameter must be type fuzz.Continue")
+		}
+		f.fuzzFuncs[argT] = v
+	}
+	return f
+}
+
+// RandSource causes f to get values from the given source of randomness.
+// Use if you want deterministic fuzzing.
+func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer {
+	f.r = rand.New(s)
+	return f
+}
+
+// NilChance sets the probability of creating a nil pointer, map, or slice to
+// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive.
+func (f *Fuzzer) NilChance(p float64) *Fuzzer {
+	if p < 0 || p > 1 {
+		panic("p should be between 0 and 1, inclusive.")
+	}
+	f.nilChance = p
+	return f
+}
+
+// NumElements sets the minimum and maximum number of elements that will be
+// added to a non-nil map or slice.
+func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer {
+	if atLeast > atMost {
+		panic("atLeast must be <= atMost")
+	}
+	if atLeast < 0 {
+		panic("atLeast must be >= 0")
+	}
+	f.minElements = atLeast
+	f.maxElements = atMost
+	return f
+}
+
+func (f *Fuzzer) genElementCount() int {
+	if f.minElements == f.maxElements {
+		return f.minElements
+	}
+	return f.minElements + f.r.Intn(f.maxElements-f.minElements+1)
+}
+
+func (f *Fuzzer) genShouldFill() bool {
+	return f.r.Float64() > f.nilChance
+}
+
+// MaxDepth sets the maximum number of recursive fuzz calls that will be made
+// before stopping.  This includes struct members, pointers, and map and slice
+// elements.
+func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
+	f.maxDepth = d
+	return f
+}
+
+// Fuzz recursively fills all of obj's fields with something random.  First
+// this tries to find a custom fuzz function (see Funcs).  If there is no
+// custom function this tests whether the object implements fuzz.Interface and,
+// if so, calls Fuzz on it to fuzz itself.  If that fails, this will see if
+// there is a default fuzz function provided by this package.  If all of that
+// fails, this will generate random values for all primitive fields and then
+// recurse for all non-primitives.
+//
+// This is safe for cyclic or tree-like structs, up to a limit.  Use the
+// MaxDepth method to adjust how deep you need it to recurse.
+//
+// obj must be a pointer. Only exported (public) fields can be set (thanks,
+// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
+// fields.
+func (f *Fuzzer) Fuzz(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	f.fuzzWithContext(v, 0)
+}
+
+// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance.  This applies only to obj and not other instances of obj's
+// type.
+// Not safe for cyclic or tree-like structs!
+// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
+// Intended for tests, so will panic on bad input or unimplemented fields.
+func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	f.fuzzWithContext(v, flagNoCustomFuzz)
+}
+
+const (
+	// Do not try to find a custom fuzz function.  Does not apply recursively.
+	flagNoCustomFuzz uint64 = 1 << iota
+)
+
+func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
+	fc := &fuzzerContext{fuzzer: f}
+	fc.doFuzz(v, flags)
+}
+
+// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
+// be thread-safe.
+type fuzzerContext struct {
+	fuzzer   *Fuzzer
+	curDepth int
+}
+
+func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
+	if fc.curDepth >= fc.fuzzer.maxDepth {
+		return
+	}
+	fc.curDepth++
+	defer func() { fc.curDepth-- }()
+
+	if !v.CanSet() {
+		return
+	}
+
+	if flags&flagNoCustomFuzz == 0 {
+		// Check for both pointer and non-pointer custom functions.
+		if v.CanAddr() && fc.tryCustom(v.Addr()) {
+			return
+		}
+		if fc.tryCustom(v) {
+			return
+		}
+	}
+
+	if fn, ok := fillFuncMap[v.Kind()]; ok {
+		fn(v, fc.fuzzer.r)
+		return
+	}
+	switch v.Kind() {
+	case reflect.Map:
+		if fc.fuzzer.genShouldFill() {
+			v.Set(reflect.MakeMap(v.Type()))
+			n := fc.fuzzer.genElementCount()
+			for i := 0; i < n; i++ {
+				key := reflect.New(v.Type().Key()).Elem()
+				fc.doFuzz(key, 0)
+				val := reflect.New(v.Type().Elem()).Elem()
+				fc.doFuzz(val, 0)
+				v.SetMapIndex(key, val)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Ptr:
+		if fc.fuzzer.genShouldFill() {
+			v.Set(reflect.New(v.Type().Elem()))
+			fc.doFuzz(v.Elem(), 0)
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Slice:
+		if fc.fuzzer.genShouldFill() {
+			n := fc.fuzzer.genElementCount()
+			v.Set(reflect.MakeSlice(v.Type(), n, n))
+			for i := 0; i < n; i++ {
+				fc.doFuzz(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Array:
+		if fc.fuzzer.genShouldFill() {
+			n := v.Len()
+			for i := 0; i < n; i++ {
+				fc.doFuzz(v.Index(i), 0)
+			}
+			return
+		}
+		v.Set(reflect.Zero(v.Type()))
+	case reflect.Struct:
+		for i := 0; i < v.NumField(); i++ {
+			fc.doFuzz(v.Field(i), 0)
+		}
+	case reflect.Chan:
+		fallthrough
+	case reflect.Func:
+		fallthrough
+	case reflect.Interface:
+		fallthrough
+	default:
+		panic(fmt.Sprintf("Can't handle %#v", v.Interface()))
+	}
+}
+
+// tryCustom searches for custom handlers, and returns true iff it finds a match
+// and successfully randomizes v.
+func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
+	// First: see if we have a fuzz function for it.
+	doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
+	if !ok {
+		// Second: see if it can fuzz itself.
+		if v.CanInterface() {
+			intf := v.Interface()
+			if fuzzable, ok := intf.(Interface); ok {
+				fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
+				return true
+			}
+		}
+		// Finally: see if there is a default fuzz function.
+		doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
+		if !ok {
+			return false
+		}
+	}
+
+	switch v.Kind() {
+	case reflect.Ptr:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+	case reflect.Map:
+		if v.IsNil() {
+			if !v.CanSet() {
+				return false
+			}
+			v.Set(reflect.MakeMap(v.Type()))
+		}
+	default:
+		return false
+	}
+
+	doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
+		fc:   fc,
+		Rand: fc.fuzzer.r,
+	})})
+	return true
+}
+
+// Interface represents an object that knows how to fuzz itself.  Any time we
+// find a type that implements this interface we will delegate the act of
+// fuzzing itself.
+type Interface interface {
+	Fuzz(c Continue)
+}
+
+// Continue can be passed to custom fuzzing functions to allow them to use
+// the correct source of randomness and to continue fuzzing their members.
+type Continue struct {
+	fc *fuzzerContext
+
+	// For convenience, Continue implements rand.Rand via embedding.
+	// Use this for generating any randomness if you want your fuzzing
+	// to be repeatable for a given seed.
+	*rand.Rand
+}
+
+// Fuzz continues fuzzing obj. obj must be a pointer.
+func (c Continue) Fuzz(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	c.fc.doFuzz(v, 0)
+}
+
+// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
+// obj's type will not be called and obj will not be tested for fuzz.Interface
+// conformance.  This applies only to obj and not other instances of obj's
+// type.
+func (c Continue) FuzzNoCustom(obj interface{}) {
+	v := reflect.ValueOf(obj)
+	if v.Kind() != reflect.Ptr {
+		panic("needed ptr!")
+	}
+	v = v.Elem()
+	c.fc.doFuzz(v, flagNoCustomFuzz)
+}
+
+// RandString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func (c Continue) RandString() string {
+	return randString(c.Rand)
+}
+
+// RandUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func (c Continue) RandUint64() uint64 {
+	return randUint64(c.Rand)
+}
+
+// RandBool returns true or false randomly.
+func (c Continue) RandBool() bool {
+	return randBool(c.Rand)
+}
+
+func fuzzInt(v reflect.Value, r *rand.Rand) {
+	v.SetInt(int64(randUint64(r)))
+}
+
+func fuzzUint(v reflect.Value, r *rand.Rand) {
+	v.SetUint(randUint64(r))
+}
+
+func fuzzTime(t *time.Time, c Continue) {
+	var sec, nsec int64
+	// Allow for about 1000 years of random time values, which keeps things
+	// like JSON parsing reasonably happy.
+	sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60)
+	c.Fuzz(&nsec)
+	*t = time.Unix(sec, nsec)
+}
+
+var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){
+	reflect.Bool: func(v reflect.Value, r *rand.Rand) {
+		v.SetBool(randBool(r))
+	},
+	reflect.Int:     fuzzInt,
+	reflect.Int8:    fuzzInt,
+	reflect.Int16:   fuzzInt,
+	reflect.Int32:   fuzzInt,
+	reflect.Int64:   fuzzInt,
+	reflect.Uint:    fuzzUint,
+	reflect.Uint8:   fuzzUint,
+	reflect.Uint16:  fuzzUint,
+	reflect.Uint32:  fuzzUint,
+	reflect.Uint64:  fuzzUint,
+	reflect.Uintptr: fuzzUint,
+	reflect.Float32: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(float64(r.Float32()))
+	},
+	reflect.Float64: func(v reflect.Value, r *rand.Rand) {
+		v.SetFloat(r.Float64())
+	},
+	reflect.Complex64: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+	reflect.Complex128: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+	reflect.String: func(v reflect.Value, r *rand.Rand) {
+		v.SetString(randString(r))
+	},
+	reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) {
+		panic("unimplemented")
+	},
+}
+
+// randBool returns true or false randomly.
+func randBool(r *rand.Rand) bool {
+	if r.Int()&1 == 1 {
+		return true
+	}
+	return false
+}
+
+type charRange struct {
+	first, last rune
+}
+
+// choose returns a random unicode character from the given range, using the
+// given randomness source.
+func (r *charRange) choose(rand *rand.Rand) rune {
+	count := int64(r.last - r.first)
+	return r.first + rune(rand.Int63n(count))
+}
+
+var unicodeRanges = []charRange{
+	{' ', '~'},           // ASCII characters
+	{'\u00a0', '\u02af'}, // Multi-byte encoded characters
+	{'\u4e00', '\u9fff'}, // Common CJK (even longer encodings)
+}
+
+// randString makes a random string up to 20 characters long. The returned string
+// may include a variety of (valid) UTF-8 encodings.
+func randString(r *rand.Rand) string {
+	n := r.Intn(20)
+	runes := make([]rune, n)
+	for i := range runes {
+		runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r)
+	}
+	return string(runes)
+}
+
+// randUint64 makes random 64 bit numbers.
+// Weirdly, rand doesn't have a function that gives you 64 random bits.
+func randUint64(r *rand.Rand) uint64 {
+	return uint64(r.Uint32())<<32 | uint64(r.Uint32())
+}
diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod
new file mode 100644
index 0000000..8ec4fe9
--- /dev/null
+++ b/vendor/github.com/google/gofuzz/go.mod
@@ -0,0 +1,3 @@
+module github.com/google/gofuzz
+
+go 1.12
diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE
new file mode 100644
index 0000000..6b0b127
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/LICENSE
@@ -0,0 +1,203 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
new file mode 100644
index 0000000..4fd44c4
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go
@@ -0,0 +1,8847 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
+package openapi_v2
+
+import (
+	"fmt"
+	"github.com/googleapis/gnostic/compiler"
+	"gopkg.in/yaml.v2"
+	"regexp"
+	"strings"
+)
+
+// Version returns the package name (and OpenAPI version).
+func Version() string {
+	return "openapi_v2"
+}
+
+// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not.
+func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) {
+	errors := make([]error, 0)
+	x := &AdditionalPropertiesItem{}
+	matched := false
+	// Schema schema = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+			if matchingError == nil {
+				x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// bool boolean = 2;
+	boolValue, ok := in.(bool)
+	if ok {
+		x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewAny creates an object of type Any if possible, returning an error if not.
+func NewAny(in interface{}, context *compiler.Context) (*Any, error) {
+	errors := make([]error, 0)
+	x := &Any{}
+	bytes, _ := yaml.Marshal(in)
+	x.Yaml = string(bytes)
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not.
+func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) {
+	errors := make([]error, 0)
+	x := &ApiKeySecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "in", "name", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [apiKey]
+			if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 2;
+		v2 := compiler.MapValueForKey(m, "name")
+		if v2 != nil {
+			x.Name, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 3;
+		v3 := compiler.MapValueForKey(m, "in")
+		if v3 != nil {
+			x.In, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [header query]
+			if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 4;
+		v4 := compiler.MapValueForKey(m, "description")
+		if v4 != nil {
+			x.Description, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 5;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not.
+func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) {
+	errors := make([]error, 0)
+	x := &BasicAuthenticationSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [basic]
+			if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not.
+func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) {
+	errors := make([]error, 0)
+	x := &BodyParameter{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "schema"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "in", "name", "required", "schema"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 2;
+		v2 := compiler.MapValueForKey(m, "name")
+		if v2 != nil {
+			x.Name, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 3;
+		v3 := compiler.MapValueForKey(m, "in")
+		if v3 != nil {
+			x.In, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [body]
+			if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool required = 4;
+		v4 := compiler.MapValueForKey(m, "required")
+		if v4 != nil {
+			x.Required, ok = v4.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Schema schema = 5;
+		v5 := compiler.MapValueForKey(m, "schema")
+		if v5 != nil {
+			var err error
+			x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewContact creates an object of type Contact if possible, returning an error if not.
+func NewContact(in interface{}, context *compiler.Context) (*Contact, error) {
+	errors := make([]error, 0)
+	x := &Contact{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"email", "name", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string email = 3;
+		v3 := compiler.MapValueForKey(m, "email")
+		if v3 != nil {
+			x.Email, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 4;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDefault creates an object of type Default if possible, returning an error if not.
+func NewDefault(in interface{}, context *compiler.Context) (*Default, error) {
+	errors := make([]error, 0)
+	x := &Default{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDefinitions creates an object of type Definitions if possible, returning an error if not.
+func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) {
+	errors := make([]error, 0)
+	x := &Definitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSchema additional_properties = 1;
+		// MAP: Schema
+		x.AdditionalProperties = make([]*NamedSchema, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSchema{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewDocument creates an object of type Document if possible, returning an error if not.
+func NewDocument(in interface{}, context *compiler.Context) (*Document, error) {
+	errors := make([]error, 0)
+	x := &Document{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"info", "paths", "swagger"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string swagger = 1;
+		v1 := compiler.MapValueForKey(m, "swagger")
+		if v1 != nil {
+			x.Swagger, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [2.0]
+			if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) {
+				message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Info info = 2;
+		v2 := compiler.MapValueForKey(m, "info")
+		if v2 != nil {
+			var err error
+			x.Info, err = NewInfo(v2, compiler.NewContext("info", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string host = 3;
+		v3 := compiler.MapValueForKey(m, "host")
+		if v3 != nil {
+			x.Host, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string base_path = 4;
+		v4 := compiler.MapValueForKey(m, "basePath")
+		if v4 != nil {
+			x.BasePath, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string schemes = 5;
+		v5 := compiler.MapValueForKey(m, "schemes")
+		if v5 != nil {
+			v, ok := v5.([]interface{})
+			if ok {
+				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [http https ws wss]
+			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v", v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string consumes = 6;
+		v6 := compiler.MapValueForKey(m, "consumes")
+		if v6 != nil {
+			v, ok := v6.([]interface{})
+			if ok {
+				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string produces = 7;
+		v7 := compiler.MapValueForKey(m, "produces")
+		if v7 != nil {
+			v, ok := v7.([]interface{})
+			if ok {
+				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Paths paths = 8;
+		v8 := compiler.MapValueForKey(m, "paths")
+		if v8 != nil {
+			var err error
+			x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Definitions definitions = 9;
+		v9 := compiler.MapValueForKey(m, "definitions")
+		if v9 != nil {
+			var err error
+			x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ParameterDefinitions parameters = 10;
+		v10 := compiler.MapValueForKey(m, "parameters")
+		if v10 != nil {
+			var err error
+			x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ResponseDefinitions responses = 11;
+		v11 := compiler.MapValueForKey(m, "responses")
+		if v11 != nil {
+			var err error
+			x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated SecurityRequirement security = 12;
+		v12 := compiler.MapValueForKey(m, "security")
+		if v12 != nil {
+			// repeated SecurityRequirement
+			x.Security = make([]*SecurityRequirement, 0)
+			a, ok := v12.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Security = append(x.Security, y)
+				}
+			}
+		}
+		// SecurityDefinitions security_definitions = 13;
+		v13 := compiler.MapValueForKey(m, "securityDefinitions")
+		if v13 != nil {
+			var err error
+			x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated Tag tags = 14;
+		v14 := compiler.MapValueForKey(m, "tags")
+		if v14 != nil {
+			// repeated Tag
+			x.Tags = make([]*Tag, 0)
+			a, ok := v14.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewTag(item, compiler.NewContext("tags", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Tags = append(x.Tags, y)
+				}
+			}
+		}
+		// ExternalDocs external_docs = 15;
+		v15 := compiler.MapValueForKey(m, "externalDocs")
+		if v15 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 16;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewExamples creates an object of type Examples if possible, returning an error if not.
+func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) {
+	errors := make([]error, 0)
+	x := &Examples{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not.
+func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) {
+	errors := make([]error, 0)
+	x := &ExternalDocs{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"url"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewFileSchema creates an object of type FileSchema if possible, returning an error if not.
+func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) {
+	errors := make([]error, 0)
+	x := &FileSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string format = 1;
+		v1 := compiler.MapValueForKey(m, "format")
+		if v1 != nil {
+			x.Format, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string title = 2;
+		v2 := compiler.MapValueForKey(m, "title")
+		if v2 != nil {
+			x.Title, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 4;
+		v4 := compiler.MapValueForKey(m, "default")
+		if v4 != nil {
+			var err error
+			x.Default, err = NewAny(v4, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated string required = 5;
+		v5 := compiler.MapValueForKey(m, "required")
+		if v5 != nil {
+			v, ok := v5.([]interface{})
+			if ok {
+				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [file]
+			if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool read_only = 7;
+		v7 := compiler.MapValueForKey(m, "readOnly")
+		if v7 != nil {
+			x.ReadOnly, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 8;
+		v8 := compiler.MapValueForKey(m, "externalDocs")
+		if v8 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Any example = 9;
+		v9 := compiler.MapValueForKey(m, "example")
+		if v9 != nil {
+			var err error
+			x.Example, err = NewAny(v9, compiler.NewContext("example", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 10;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not.
+func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &FormDataParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [formData]
+			if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool allow_empty_value = 5;
+		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
+		if v5 != nil {
+			x.AllowEmptyValue, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array file]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 7;
+		v7 := compiler.MapValueForKey(m, "format")
+		if v7 != nil {
+			x.Format, ok = v7.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 8;
+		v8 := compiler.MapValueForKey(m, "items")
+		if v8 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 9;
+		v9 := compiler.MapValueForKey(m, "collectionFormat")
+		if v9 != nil {
+			x.CollectionFormat, ok = v9.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes multi]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 10;
+		v10 := compiler.MapValueForKey(m, "default")
+		if v10 != nil {
+			var err error
+			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 11;
+		v11 := compiler.MapValueForKey(m, "maximum")
+		if v11 != nil {
+			switch v11 := v11.(type) {
+			case float64:
+				x.Maximum = v11
+			case float32:
+				x.Maximum = float64(v11)
+			case uint64:
+				x.Maximum = float64(v11)
+			case uint32:
+				x.Maximum = float64(v11)
+			case int64:
+				x.Maximum = float64(v11)
+			case int32:
+				x.Maximum = float64(v11)
+			case int:
+				x.Maximum = float64(v11)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 12;
+		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v12 != nil {
+			x.ExclusiveMaximum, ok = v12.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 13;
+		v13 := compiler.MapValueForKey(m, "minimum")
+		if v13 != nil {
+			switch v13 := v13.(type) {
+			case float64:
+				x.Minimum = v13
+			case float32:
+				x.Minimum = float64(v13)
+			case uint64:
+				x.Minimum = float64(v13)
+			case uint32:
+				x.Minimum = float64(v13)
+			case int64:
+				x.Minimum = float64(v13)
+			case int32:
+				x.Minimum = float64(v13)
+			case int:
+				x.Minimum = float64(v13)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 14;
+		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v14 != nil {
+			x.ExclusiveMinimum, ok = v14.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 15;
+		v15 := compiler.MapValueForKey(m, "maxLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 16;
+		v16 := compiler.MapValueForKey(m, "minLength")
+		if v16 != nil {
+			t, ok := v16.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 17;
+		v17 := compiler.MapValueForKey(m, "pattern")
+		if v17 != nil {
+			x.Pattern, ok = v17.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 18;
+		v18 := compiler.MapValueForKey(m, "maxItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 19;
+		v19 := compiler.MapValueForKey(m, "minItems")
+		if v19 != nil {
+			t, ok := v19.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 20;
+		v20 := compiler.MapValueForKey(m, "uniqueItems")
+		if v20 != nil {
+			x.UniqueItems, ok = v20.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 21;
+		v21 := compiler.MapValueForKey(m, "enum")
+		if v21 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v21.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 22;
+		v22 := compiler.MapValueForKey(m, "multipleOf")
+		if v22 != nil {
+			switch v22 := v22.(type) {
+			case float64:
+				x.MultipleOf = v22
+			case float32:
+				x.MultipleOf = float64(v22)
+			case uint64:
+				x.MultipleOf = float64(v22)
+			case uint32:
+				x.MultipleOf = float64(v22)
+			case int64:
+				x.MultipleOf = float64(v22)
+			case int32:
+				x.MultipleOf = float64(v22)
+			case int:
+				x.MultipleOf = float64(v22)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 23;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeader creates an object of type Header if possible, returning an error if not.
+func NewHeader(in interface{}, context *compiler.Context) (*Header, error) {
+	errors := make([]error, 0)
+	x := &Header{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number integer boolean array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 3;
+		v3 := compiler.MapValueForKey(m, "items")
+		if v3 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 4;
+		v4 := compiler.MapValueForKey(m, "collectionFormat")
+		if v4 != nil {
+			x.CollectionFormat, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 6;
+		v6 := compiler.MapValueForKey(m, "maximum")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.Maximum = v6
+			case float32:
+				x.Maximum = float64(v6)
+			case uint64:
+				x.Maximum = float64(v6)
+			case uint32:
+				x.Maximum = float64(v6)
+			case int64:
+				x.Maximum = float64(v6)
+			case int32:
+				x.Maximum = float64(v6)
+			case int:
+				x.Maximum = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 7;
+		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v7 != nil {
+			x.ExclusiveMaximum, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 8;
+		v8 := compiler.MapValueForKey(m, "minimum")
+		if v8 != nil {
+			switch v8 := v8.(type) {
+			case float64:
+				x.Minimum = v8
+			case float32:
+				x.Minimum = float64(v8)
+			case uint64:
+				x.Minimum = float64(v8)
+			case uint32:
+				x.Minimum = float64(v8)
+			case int64:
+				x.Minimum = float64(v8)
+			case int32:
+				x.Minimum = float64(v8)
+			case int:
+				x.Minimum = float64(v8)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 9;
+		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v9 != nil {
+			x.ExclusiveMinimum, ok = v9.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 10;
+		v10 := compiler.MapValueForKey(m, "maxLength")
+		if v10 != nil {
+			t, ok := v10.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 11;
+		v11 := compiler.MapValueForKey(m, "minLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 12;
+		v12 := compiler.MapValueForKey(m, "pattern")
+		if v12 != nil {
+			x.Pattern, ok = v12.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 13;
+		v13 := compiler.MapValueForKey(m, "maxItems")
+		if v13 != nil {
+			t, ok := v13.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 14;
+		v14 := compiler.MapValueForKey(m, "minItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 15;
+		v15 := compiler.MapValueForKey(m, "uniqueItems")
+		if v15 != nil {
+			x.UniqueItems, ok = v15.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 16;
+		v16 := compiler.MapValueForKey(m, "enum")
+		if v16 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v16.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 17;
+		v17 := compiler.MapValueForKey(m, "multipleOf")
+		if v17 != nil {
+			switch v17 := v17.(type) {
+			case float64:
+				x.MultipleOf = v17
+			case float32:
+				x.MultipleOf = float64(v17)
+			case uint64:
+				x.MultipleOf = float64(v17)
+			case uint32:
+				x.MultipleOf = float64(v17)
+			case int64:
+				x.MultipleOf = float64(v17)
+			case int32:
+				x.MultipleOf = float64(v17)
+			case int:
+				x.MultipleOf = float64(v17)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 18;
+		v18 := compiler.MapValueForKey(m, "description")
+		if v18 != nil {
+			x.Description, ok = v18.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 19;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not.
+func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &HeaderParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [header]
+			if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 5;
+		v5 := compiler.MapValueForKey(m, "type")
+		if v5 != nil {
+			x.Type, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 6;
+		v6 := compiler.MapValueForKey(m, "format")
+		if v6 != nil {
+			x.Format, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 7;
+		v7 := compiler.MapValueForKey(m, "items")
+		if v7 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 8;
+		v8 := compiler.MapValueForKey(m, "collectionFormat")
+		if v8 != nil {
+			x.CollectionFormat, ok = v8.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 9;
+		v9 := compiler.MapValueForKey(m, "default")
+		if v9 != nil {
+			var err error
+			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 10;
+		v10 := compiler.MapValueForKey(m, "maximum")
+		if v10 != nil {
+			switch v10 := v10.(type) {
+			case float64:
+				x.Maximum = v10
+			case float32:
+				x.Maximum = float64(v10)
+			case uint64:
+				x.Maximum = float64(v10)
+			case uint32:
+				x.Maximum = float64(v10)
+			case int64:
+				x.Maximum = float64(v10)
+			case int32:
+				x.Maximum = float64(v10)
+			case int:
+				x.Maximum = float64(v10)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 11;
+		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v11 != nil {
+			x.ExclusiveMaximum, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 12;
+		v12 := compiler.MapValueForKey(m, "minimum")
+		if v12 != nil {
+			switch v12 := v12.(type) {
+			case float64:
+				x.Minimum = v12
+			case float32:
+				x.Minimum = float64(v12)
+			case uint64:
+				x.Minimum = float64(v12)
+			case uint32:
+				x.Minimum = float64(v12)
+			case int64:
+				x.Minimum = float64(v12)
+			case int32:
+				x.Minimum = float64(v12)
+			case int:
+				x.Minimum = float64(v12)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 13;
+		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v13 != nil {
+			x.ExclusiveMinimum, ok = v13.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 14;
+		v14 := compiler.MapValueForKey(m, "maxLength")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 15;
+		v15 := compiler.MapValueForKey(m, "minLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 16;
+		v16 := compiler.MapValueForKey(m, "pattern")
+		if v16 != nil {
+			x.Pattern, ok = v16.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 17;
+		v17 := compiler.MapValueForKey(m, "maxItems")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 18;
+		v18 := compiler.MapValueForKey(m, "minItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 19;
+		v19 := compiler.MapValueForKey(m, "uniqueItems")
+		if v19 != nil {
+			x.UniqueItems, ok = v19.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 21;
+		v21 := compiler.MapValueForKey(m, "multipleOf")
+		if v21 != nil {
+			switch v21 := v21.(type) {
+			case float64:
+				x.MultipleOf = v21
+			case float32:
+				x.MultipleOf = float64(v21)
+			case uint64:
+				x.MultipleOf = float64(v21)
+			case uint32:
+				x.MultipleOf = float64(v21)
+			case int64:
+				x.MultipleOf = float64(v21)
+			case int32:
+				x.MultipleOf = float64(v21)
+			case int:
+				x.MultipleOf = float64(v21)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 22;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewHeaders creates an object of type Headers if possible, returning an error if not.
+func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) {
+	errors := make([]error, 0)
+	x := &Headers{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedHeader additional_properties = 1;
+		// MAP: Header
+		x.AdditionalProperties = make([]*NamedHeader, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedHeader{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewHeader(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewInfo creates an object of type Info if possible, returning an error if not.
+func NewInfo(in interface{}, context *compiler.Context) (*Info, error) {
+	errors := make([]error, 0)
+	x := &Info{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"title", "version"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string title = 1;
+		v1 := compiler.MapValueForKey(m, "title")
+		if v1 != nil {
+			x.Title, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string version = 2;
+		v2 := compiler.MapValueForKey(m, "version")
+		if v2 != nil {
+			x.Version, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string terms_of_service = 4;
+		v4 := compiler.MapValueForKey(m, "termsOfService")
+		if v4 != nil {
+			x.TermsOfService, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Contact contact = 5;
+		v5 := compiler.MapValueForKey(m, "contact")
+		if v5 != nil {
+			var err error
+			x.Contact, err = NewContact(v5, compiler.NewContext("contact", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// License license = 6;
+		v6 := compiler.MapValueForKey(m, "license")
+		if v6 != nil {
+			var err error
+			x.License, err = NewLicense(v6, compiler.NewContext("license", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 7;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not.
+func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) {
+	errors := make([]error, 0)
+	x := &ItemsItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		x.Schema = make([]*Schema, 0)
+		y, err := NewSchema(m, compiler.NewContext("<array>", context))
+		if err != nil {
+			return nil, err
+		}
+		x.Schema = append(x.Schema, y)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewJsonReference creates an object of type JsonReference if possible, returning an error if not.
+func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) {
+	errors := make([]error, 0)
+	x := &JsonReference{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"$ref"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"$ref", "description"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewLicense creates an object of type License if possible, returning an error if not.
+func NewLicense(in interface{}, context *compiler.Context) (*License, error) {
+	errors := make([]error, 0)
+	x := &License{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"name"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"name", "url"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string url = 2;
+		v2 := compiler.MapValueForKey(m, "url")
+		if v2 != nil {
+			x.Url, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 3;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedAny creates an object of type NamedAny if possible, returning an error if not.
+func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) {
+	errors := make([]error, 0)
+	x := &NamedAny{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewAny(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not.
+func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) {
+	errors := make([]error, 0)
+	x := &NamedHeader{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Header value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewHeader(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not.
+func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) {
+	errors := make([]error, 0)
+	x := &NamedParameter{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Parameter value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewParameter(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not.
+func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) {
+	errors := make([]error, 0)
+	x := &NamedPathItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PathItem value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewPathItem(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not.
+func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) {
+	errors := make([]error, 0)
+	x := &NamedResponse{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Response value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewResponse(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not.
+func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) {
+	errors := make([]error, 0)
+	x := &NamedResponseValue{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ResponseValue value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not.
+func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) {
+	errors := make([]error, 0)
+	x := &NamedSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Schema value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewSchema(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not.
+func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) {
+	errors := make([]error, 0)
+	x := &NamedSecurityDefinitionsItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// SecurityDefinitionsItem value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedString creates an object of type NamedString if possible, returning an error if not.
+func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) {
+	errors := make([]error, 0)
+	x := &NamedString{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			x.Value, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not.
+func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) {
+	errors := make([]error, 0)
+	x := &NamedStringArray{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"name", "value"}
+		var allowedPatterns []*regexp.Regexp
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// StringArray value = 2;
+		v2 := compiler.MapValueForKey(m, "value")
+		if v2 != nil {
+			var err error
+			x.Value, err = NewStringArray(v2, compiler.NewContext("value", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not.
+func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) {
+	errors := make([]error, 0)
+	x := &NonBodyParameter{}
+	matched := false
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"in", "name", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// HeaderParameterSubSchema header_parameter_sub_schema = 1;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// QueryParameterSubSchema query_parameter_sub_schema = 3;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+		// PathParameterSubSchema path_parameter_sub_schema = 4;
+		{
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context))
+			if matchingError == nil {
+				x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not.
+func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2AccessCodeSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [accessCode]
+			if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string authorization_url = 4;
+		v4 := compiler.MapValueForKey(m, "authorizationUrl")
+		if v4 != nil {
+			x.AuthorizationUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string token_url = 5;
+		v5 := compiler.MapValueForKey(m, "tokenUrl")
+		if v5 != nil {
+			x.TokenUrl, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 6;
+		v6 := compiler.MapValueForKey(m, "description")
+		if v6 != nil {
+			x.Description, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 7;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not.
+func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2ApplicationSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [application]
+			if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string token_url = 4;
+		v4 := compiler.MapValueForKey(m, "tokenUrl")
+		if v4 != nil {
+			x.TokenUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not.
+func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2ImplicitSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"authorizationUrl", "flow", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [implicit]
+			if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string authorization_url = 4;
+		v4 := compiler.MapValueForKey(m, "authorizationUrl")
+		if v4 != nil {
+			x.AuthorizationUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not.
+func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) {
+	errors := make([]error, 0)
+	x := &Oauth2PasswordSecurity{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"flow", "tokenUrl", "type"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [oauth2]
+			if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string flow = 2;
+		v2 := compiler.MapValueForKey(m, "flow")
+		if v2 != nil {
+			x.Flow, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [password]
+			if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) {
+				message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Oauth2Scopes scopes = 3;
+		v3 := compiler.MapValueForKey(m, "scopes")
+		if v3 != nil {
+			var err error
+			x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string token_url = 4;
+		v4 := compiler.MapValueForKey(m, "tokenUrl")
+		if v4 != nil {
+			x.TokenUrl, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 5;
+		v5 := compiler.MapValueForKey(m, "description")
+		if v5 != nil {
+			x.Description, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not.
+func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) {
+	errors := make([]error, 0)
+	x := &Oauth2Scopes{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedString additional_properties = 1;
+		// MAP: string
+		x.AdditionalProperties = make([]*NamedString, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedString{}
+				pair.Name = k
+				pair.Value = v.(string)
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewOperation creates an object of type Operation if possible, returning an error if not.
+func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) {
+	errors := make([]error, 0)
+	x := &Operation{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"responses"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated string tags = 1;
+		v1 := compiler.MapValueForKey(m, "tags")
+		if v1 != nil {
+			v, ok := v1.([]interface{})
+			if ok {
+				x.Tags = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string summary = 2;
+		v2 := compiler.MapValueForKey(m, "summary")
+		if v2 != nil {
+			x.Summary, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 4;
+		v4 := compiler.MapValueForKey(m, "externalDocs")
+		if v4 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string operation_id = 5;
+		v5 := compiler.MapValueForKey(m, "operationId")
+		if v5 != nil {
+			x.OperationId, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string produces = 6;
+		v6 := compiler.MapValueForKey(m, "produces")
+		if v6 != nil {
+			v, ok := v6.([]interface{})
+			if ok {
+				x.Produces = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string consumes = 7;
+		v7 := compiler.MapValueForKey(m, "consumes")
+		if v7 != nil {
+			v, ok := v7.([]interface{})
+			if ok {
+				x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated ParametersItem parameters = 8;
+		v8 := compiler.MapValueForKey(m, "parameters")
+		if v8 != nil {
+			// repeated ParametersItem
+			x.Parameters = make([]*ParametersItem, 0)
+			a, ok := v8.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Parameters = append(x.Parameters, y)
+				}
+			}
+		}
+		// Responses responses = 9;
+		v9 := compiler.MapValueForKey(m, "responses")
+		if v9 != nil {
+			var err error
+			x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated string schemes = 10;
+		v10 := compiler.MapValueForKey(m, "schemes")
+		if v10 != nil {
+			v, ok := v10.([]interface{})
+			if ok {
+				x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [http https ws wss]
+			if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) {
+				message := fmt.Sprintf("has unexpected value for schemes: %+v", v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool deprecated = 11;
+		v11 := compiler.MapValueForKey(m, "deprecated")
+		if v11 != nil {
+			x.Deprecated, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated SecurityRequirement security = 12;
+		v12 := compiler.MapValueForKey(m, "security")
+		if v12 != nil {
+			// repeated SecurityRequirement
+			x.Security = make([]*SecurityRequirement, 0)
+			a, ok := v12.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSecurityRequirement(item, compiler.NewContext("security", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Security = append(x.Security, y)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 13;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParameter creates an object of type Parameter if possible, returning an error if not.
+func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) {
+	errors := make([]error, 0)
+	x := &Parameter{}
+	matched := false
+	// BodyParameter body_parameter = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context))
+			if matchingError == nil {
+				x.Oneof = &Parameter_BodyParameter{BodyParameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// NonBodyParameter non_body_parameter = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context))
+			if matchingError == nil {
+				x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not.
+func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) {
+	errors := make([]error, 0)
+	x := &ParameterDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedParameter additional_properties = 1;
+		// MAP: Parameter
+		x.AdditionalProperties = make([]*NamedParameter, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedParameter{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewParameter(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not.
+func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) {
+	errors := make([]error, 0)
+	x := &ParametersItem{}
+	matched := false
+	// Parameter parameter = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewParameter(m, compiler.NewContext("parameter", context))
+			if matchingError == nil {
+				x.Oneof = &ParametersItem_Parameter{Parameter: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// JsonReference json_reference = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+			if matchingError == nil {
+				x.Oneof = &ParametersItem_JsonReference{JsonReference: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPathItem creates an object of type PathItem if possible, returning an error if not.
+func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) {
+	errors := make([]error, 0)
+	x := &PathItem{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Operation get = 2;
+		v2 := compiler.MapValueForKey(m, "get")
+		if v2 != nil {
+			var err error
+			x.Get, err = NewOperation(v2, compiler.NewContext("get", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation put = 3;
+		v3 := compiler.MapValueForKey(m, "put")
+		if v3 != nil {
+			var err error
+			x.Put, err = NewOperation(v3, compiler.NewContext("put", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation post = 4;
+		v4 := compiler.MapValueForKey(m, "post")
+		if v4 != nil {
+			var err error
+			x.Post, err = NewOperation(v4, compiler.NewContext("post", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation delete = 5;
+		v5 := compiler.MapValueForKey(m, "delete")
+		if v5 != nil {
+			var err error
+			x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation options = 6;
+		v6 := compiler.MapValueForKey(m, "options")
+		if v6 != nil {
+			var err error
+			x.Options, err = NewOperation(v6, compiler.NewContext("options", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation head = 7;
+		v7 := compiler.MapValueForKey(m, "head")
+		if v7 != nil {
+			var err error
+			x.Head, err = NewOperation(v7, compiler.NewContext("head", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Operation patch = 8;
+		v8 := compiler.MapValueForKey(m, "patch")
+		if v8 != nil {
+			var err error
+			x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated ParametersItem parameters = 9;
+		v9 := compiler.MapValueForKey(m, "parameters")
+		if v9 != nil {
+			// repeated ParametersItem
+			x.Parameters = make([]*ParametersItem, 0)
+			a, ok := v9.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewParametersItem(item, compiler.NewContext("parameters", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Parameters = append(x.Parameters, y)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 10;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not.
+func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &PathParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"required"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [path]
+			if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 5;
+		v5 := compiler.MapValueForKey(m, "type")
+		if v5 != nil {
+			x.Type, ok = v5.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 6;
+		v6 := compiler.MapValueForKey(m, "format")
+		if v6 != nil {
+			x.Format, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 7;
+		v7 := compiler.MapValueForKey(m, "items")
+		if v7 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 8;
+		v8 := compiler.MapValueForKey(m, "collectionFormat")
+		if v8 != nil {
+			x.CollectionFormat, ok = v8.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 9;
+		v9 := compiler.MapValueForKey(m, "default")
+		if v9 != nil {
+			var err error
+			x.Default, err = NewAny(v9, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 10;
+		v10 := compiler.MapValueForKey(m, "maximum")
+		if v10 != nil {
+			switch v10 := v10.(type) {
+			case float64:
+				x.Maximum = v10
+			case float32:
+				x.Maximum = float64(v10)
+			case uint64:
+				x.Maximum = float64(v10)
+			case uint32:
+				x.Maximum = float64(v10)
+			case int64:
+				x.Maximum = float64(v10)
+			case int32:
+				x.Maximum = float64(v10)
+			case int:
+				x.Maximum = float64(v10)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 11;
+		v11 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v11 != nil {
+			x.ExclusiveMaximum, ok = v11.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 12;
+		v12 := compiler.MapValueForKey(m, "minimum")
+		if v12 != nil {
+			switch v12 := v12.(type) {
+			case float64:
+				x.Minimum = v12
+			case float32:
+				x.Minimum = float64(v12)
+			case uint64:
+				x.Minimum = float64(v12)
+			case uint32:
+				x.Minimum = float64(v12)
+			case int64:
+				x.Minimum = float64(v12)
+			case int32:
+				x.Minimum = float64(v12)
+			case int:
+				x.Minimum = float64(v12)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 13;
+		v13 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v13 != nil {
+			x.ExclusiveMinimum, ok = v13.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 14;
+		v14 := compiler.MapValueForKey(m, "maxLength")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 15;
+		v15 := compiler.MapValueForKey(m, "minLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 16;
+		v16 := compiler.MapValueForKey(m, "pattern")
+		if v16 != nil {
+			x.Pattern, ok = v16.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 17;
+		v17 := compiler.MapValueForKey(m, "maxItems")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 18;
+		v18 := compiler.MapValueForKey(m, "minItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 19;
+		v19 := compiler.MapValueForKey(m, "uniqueItems")
+		if v19 != nil {
+			x.UniqueItems, ok = v19.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 21;
+		v21 := compiler.MapValueForKey(m, "multipleOf")
+		if v21 != nil {
+			switch v21 := v21.(type) {
+			case float64:
+				x.MultipleOf = v21
+			case float32:
+				x.MultipleOf = float64(v21)
+			case uint64:
+				x.MultipleOf = float64(v21)
+			case uint32:
+				x.MultipleOf = float64(v21)
+			case int64:
+				x.MultipleOf = float64(v21)
+			case int32:
+				x.MultipleOf = float64(v21)
+			case int:
+				x.MultipleOf = float64(v21)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 22;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPaths creates an object of type Paths if possible, returning an error if not.
+func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) {
+	errors := make([]error, 0)
+	x := &Paths{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{}
+		allowedPatterns := []*regexp.Regexp{pattern0, pattern1}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated NamedAny vendor_extension = 1;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+		// repeated NamedPathItem path = 2;
+		// MAP: PathItem ^/
+		x.Path = make([]*NamedPathItem, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "/") {
+					pair := &NamedPathItem{}
+					pair.Name = k
+					var err error
+					pair.Value, err = NewPathItem(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Path = append(x.Path, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not.
+func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) {
+	errors := make([]error, 0)
+	x := &PrimitivesItems{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string type = 1;
+		v1 := compiler.MapValueForKey(m, "type")
+		if v1 != nil {
+			x.Type, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number integer boolean array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 3;
+		v3 := compiler.MapValueForKey(m, "items")
+		if v3 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 4;
+		v4 := compiler.MapValueForKey(m, "collectionFormat")
+		if v4 != nil {
+			x.CollectionFormat, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 6;
+		v6 := compiler.MapValueForKey(m, "maximum")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.Maximum = v6
+			case float32:
+				x.Maximum = float64(v6)
+			case uint64:
+				x.Maximum = float64(v6)
+			case uint32:
+				x.Maximum = float64(v6)
+			case int64:
+				x.Maximum = float64(v6)
+			case int32:
+				x.Maximum = float64(v6)
+			case int:
+				x.Maximum = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 7;
+		v7 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v7 != nil {
+			x.ExclusiveMaximum, ok = v7.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 8;
+		v8 := compiler.MapValueForKey(m, "minimum")
+		if v8 != nil {
+			switch v8 := v8.(type) {
+			case float64:
+				x.Minimum = v8
+			case float32:
+				x.Minimum = float64(v8)
+			case uint64:
+				x.Minimum = float64(v8)
+			case uint32:
+				x.Minimum = float64(v8)
+			case int64:
+				x.Minimum = float64(v8)
+			case int32:
+				x.Minimum = float64(v8)
+			case int:
+				x.Minimum = float64(v8)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 9;
+		v9 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v9 != nil {
+			x.ExclusiveMinimum, ok = v9.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 10;
+		v10 := compiler.MapValueForKey(m, "maxLength")
+		if v10 != nil {
+			t, ok := v10.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 11;
+		v11 := compiler.MapValueForKey(m, "minLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 12;
+		v12 := compiler.MapValueForKey(m, "pattern")
+		if v12 != nil {
+			x.Pattern, ok = v12.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 13;
+		v13 := compiler.MapValueForKey(m, "maxItems")
+		if v13 != nil {
+			t, ok := v13.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 14;
+		v14 := compiler.MapValueForKey(m, "minItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 15;
+		v15 := compiler.MapValueForKey(m, "uniqueItems")
+		if v15 != nil {
+			x.UniqueItems, ok = v15.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 16;
+		v16 := compiler.MapValueForKey(m, "enum")
+		if v16 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v16.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 17;
+		v17 := compiler.MapValueForKey(m, "multipleOf")
+		if v17 != nil {
+			switch v17 := v17.(type) {
+			case float64:
+				x.MultipleOf = v17
+			case float32:
+				x.MultipleOf = float64(v17)
+			case uint64:
+				x.MultipleOf = float64(v17)
+			case uint32:
+				x.MultipleOf = float64(v17)
+			case int64:
+				x.MultipleOf = float64(v17)
+			case int32:
+				x.MultipleOf = float64(v17)
+			case int:
+				x.MultipleOf = float64(v17)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 18;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewProperties creates an object of type Properties if possible, returning an error if not.
+func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) {
+	errors := make([]error, 0)
+	x := &Properties{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSchema additional_properties = 1;
+		// MAP: Schema
+		x.AdditionalProperties = make([]*NamedSchema, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSchema{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSchema(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not.
+func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) {
+	errors := make([]error, 0)
+	x := &QueryParameterSubSchema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// bool required = 1;
+		v1 := compiler.MapValueForKey(m, "required")
+		if v1 != nil {
+			x.Required, ok = v1.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string in = 2;
+		v2 := compiler.MapValueForKey(m, "in")
+		if v2 != nil {
+			x.In, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [query]
+			if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) {
+				message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 3;
+		v3 := compiler.MapValueForKey(m, "description")
+		if v3 != nil {
+			x.Description, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string name = 4;
+		v4 := compiler.MapValueForKey(m, "name")
+		if v4 != nil {
+			x.Name, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool allow_empty_value = 5;
+		v5 := compiler.MapValueForKey(m, "allowEmptyValue")
+		if v5 != nil {
+			x.AllowEmptyValue, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string type = 6;
+		v6 := compiler.MapValueForKey(m, "type")
+		if v6 != nil {
+			x.Type, ok = v6.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [string number boolean integer array]
+			if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) {
+				message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 7;
+		v7 := compiler.MapValueForKey(m, "format")
+		if v7 != nil {
+			x.Format, ok = v7.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// PrimitivesItems items = 8;
+		v8 := compiler.MapValueForKey(m, "items")
+		if v8 != nil {
+			var err error
+			x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string collection_format = 9;
+		v9 := compiler.MapValueForKey(m, "collectionFormat")
+		if v9 != nil {
+			x.CollectionFormat, ok = v9.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+			// check for valid enum values
+			// [csv ssv tsv pipes multi]
+			if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) {
+				message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 10;
+		v10 := compiler.MapValueForKey(m, "default")
+		if v10 != nil {
+			var err error
+			x.Default, err = NewAny(v10, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float maximum = 11;
+		v11 := compiler.MapValueForKey(m, "maximum")
+		if v11 != nil {
+			switch v11 := v11.(type) {
+			case float64:
+				x.Maximum = v11
+			case float32:
+				x.Maximum = float64(v11)
+			case uint64:
+				x.Maximum = float64(v11)
+			case uint32:
+				x.Maximum = float64(v11)
+			case int64:
+				x.Maximum = float64(v11)
+			case int32:
+				x.Maximum = float64(v11)
+			case int:
+				x.Maximum = float64(v11)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 12;
+		v12 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v12 != nil {
+			x.ExclusiveMaximum, ok = v12.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 13;
+		v13 := compiler.MapValueForKey(m, "minimum")
+		if v13 != nil {
+			switch v13 := v13.(type) {
+			case float64:
+				x.Minimum = v13
+			case float32:
+				x.Minimum = float64(v13)
+			case uint64:
+				x.Minimum = float64(v13)
+			case uint32:
+				x.Minimum = float64(v13)
+			case int64:
+				x.Minimum = float64(v13)
+			case int32:
+				x.Minimum = float64(v13)
+			case int:
+				x.Minimum = float64(v13)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 14;
+		v14 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v14 != nil {
+			x.ExclusiveMinimum, ok = v14.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 15;
+		v15 := compiler.MapValueForKey(m, "maxLength")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 16;
+		v16 := compiler.MapValueForKey(m, "minLength")
+		if v16 != nil {
+			t, ok := v16.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 17;
+		v17 := compiler.MapValueForKey(m, "pattern")
+		if v17 != nil {
+			x.Pattern, ok = v17.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 18;
+		v18 := compiler.MapValueForKey(m, "maxItems")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 19;
+		v19 := compiler.MapValueForKey(m, "minItems")
+		if v19 != nil {
+			t, ok := v19.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 20;
+		v20 := compiler.MapValueForKey(m, "uniqueItems")
+		if v20 != nil {
+			x.UniqueItems, ok = v20.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 21;
+		v21 := compiler.MapValueForKey(m, "enum")
+		if v21 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v21.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// float multiple_of = 22;
+		v22 := compiler.MapValueForKey(m, "multipleOf")
+		if v22 != nil {
+			switch v22 := v22.(type) {
+			case float64:
+				x.MultipleOf = v22
+			case float32:
+				x.MultipleOf = float64(v22)
+			case uint64:
+				x.MultipleOf = float64(v22)
+			case uint32:
+				x.MultipleOf = float64(v22)
+			case int64:
+				x.MultipleOf = float64(v22)
+			case int32:
+				x.MultipleOf = float64(v22)
+			case int:
+				x.MultipleOf = float64(v22)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 23;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponse creates an object of type Response if possible, returning an error if not.
+func NewResponse(in interface{}, context *compiler.Context) (*Response, error) {
+	errors := make([]error, 0)
+	x := &Response{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"description"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "examples", "headers", "schema"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string description = 1;
+		v1 := compiler.MapValueForKey(m, "description")
+		if v1 != nil {
+			x.Description, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// SchemaItem schema = 2;
+		v2 := compiler.MapValueForKey(m, "schema")
+		if v2 != nil {
+			var err error
+			x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Headers headers = 3;
+		v3 := compiler.MapValueForKey(m, "headers")
+		if v3 != nil {
+			var err error
+			x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Examples examples = 4;
+		v4 := compiler.MapValueForKey(m, "examples")
+		if v4 != nil {
+			var err error
+			x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 5;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not.
+func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) {
+	errors := make([]error, 0)
+	x := &ResponseDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedResponse additional_properties = 1;
+		// MAP: Response
+		x.AdditionalProperties = make([]*NamedResponse, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedResponse{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewResponse(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not.
+func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) {
+	errors := make([]error, 0)
+	x := &ResponseValue{}
+	matched := false
+	// Response response = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewResponse(m, compiler.NewContext("response", context))
+			if matchingError == nil {
+				x.Oneof = &ResponseValue_Response{Response: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// JsonReference json_reference = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context))
+			if matchingError == nil {
+				x.Oneof = &ResponseValue_JsonReference{JsonReference: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewResponses creates an object of type Responses if possible, returning an error if not.
+func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) {
+	errors := make([]error, 0)
+	x := &Responses{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{}
+		allowedPatterns := []*regexp.Regexp{pattern2, pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// repeated NamedResponseValue response_code = 1;
+		// MAP: ResponseValue ^([0-9]{3})$|^(default)$
+		x.ResponseCode = make([]*NamedResponseValue, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if pattern2.MatchString(k) {
+					pair := &NamedResponseValue{}
+					pair.Name = k
+					var err error
+					pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.ResponseCode = append(x.ResponseCode, pair)
+				}
+			}
+		}
+		// repeated NamedAny vendor_extension = 2;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSchema creates an object of type Schema if possible, returning an error if not.
+func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) {
+	errors := make([]error, 0)
+	x := &Schema{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string _ref = 1;
+		v1 := compiler.MapValueForKey(m, "$ref")
+		if v1 != nil {
+			x.XRef, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string format = 2;
+		v2 := compiler.MapValueForKey(m, "format")
+		if v2 != nil {
+			x.Format, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string title = 3;
+		v3 := compiler.MapValueForKey(m, "title")
+		if v3 != nil {
+			x.Title, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 4;
+		v4 := compiler.MapValueForKey(m, "description")
+		if v4 != nil {
+			x.Description, ok = v4.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Any default = 5;
+		v5 := compiler.MapValueForKey(m, "default")
+		if v5 != nil {
+			var err error
+			x.Default, err = NewAny(v5, compiler.NewContext("default", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// float multiple_of = 6;
+		v6 := compiler.MapValueForKey(m, "multipleOf")
+		if v6 != nil {
+			switch v6 := v6.(type) {
+			case float64:
+				x.MultipleOf = v6
+			case float32:
+				x.MultipleOf = float64(v6)
+			case uint64:
+				x.MultipleOf = float64(v6)
+			case uint32:
+				x.MultipleOf = float64(v6)
+			case int64:
+				x.MultipleOf = float64(v6)
+			case int32:
+				x.MultipleOf = float64(v6)
+			case int:
+				x.MultipleOf = float64(v6)
+			default:
+				message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float maximum = 7;
+		v7 := compiler.MapValueForKey(m, "maximum")
+		if v7 != nil {
+			switch v7 := v7.(type) {
+			case float64:
+				x.Maximum = v7
+			case float32:
+				x.Maximum = float64(v7)
+			case uint64:
+				x.Maximum = float64(v7)
+			case uint32:
+				x.Maximum = float64(v7)
+			case int64:
+				x.Maximum = float64(v7)
+			case int32:
+				x.Maximum = float64(v7)
+			case int:
+				x.Maximum = float64(v7)
+			default:
+				message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_maximum = 8;
+		v8 := compiler.MapValueForKey(m, "exclusiveMaximum")
+		if v8 != nil {
+			x.ExclusiveMaximum, ok = v8.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// float minimum = 9;
+		v9 := compiler.MapValueForKey(m, "minimum")
+		if v9 != nil {
+			switch v9 := v9.(type) {
+			case float64:
+				x.Minimum = v9
+			case float32:
+				x.Minimum = float64(v9)
+			case uint64:
+				x.Minimum = float64(v9)
+			case uint32:
+				x.Minimum = float64(v9)
+			case int64:
+				x.Minimum = float64(v9)
+			case int32:
+				x.Minimum = float64(v9)
+			case int:
+				x.Minimum = float64(v9)
+			default:
+				message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool exclusive_minimum = 10;
+		v10 := compiler.MapValueForKey(m, "exclusiveMinimum")
+		if v10 != nil {
+			x.ExclusiveMinimum, ok = v10.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_length = 11;
+		v11 := compiler.MapValueForKey(m, "maxLength")
+		if v11 != nil {
+			t, ok := v11.(int)
+			if ok {
+				x.MaxLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_length = 12;
+		v12 := compiler.MapValueForKey(m, "minLength")
+		if v12 != nil {
+			t, ok := v12.(int)
+			if ok {
+				x.MinLength = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string pattern = 13;
+		v13 := compiler.MapValueForKey(m, "pattern")
+		if v13 != nil {
+			x.Pattern, ok = v13.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_items = 14;
+		v14 := compiler.MapValueForKey(m, "maxItems")
+		if v14 != nil {
+			t, ok := v14.(int)
+			if ok {
+				x.MaxItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_items = 15;
+		v15 := compiler.MapValueForKey(m, "minItems")
+		if v15 != nil {
+			t, ok := v15.(int)
+			if ok {
+				x.MinItems = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool unique_items = 16;
+		v16 := compiler.MapValueForKey(m, "uniqueItems")
+		if v16 != nil {
+			x.UniqueItems, ok = v16.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 max_properties = 17;
+		v17 := compiler.MapValueForKey(m, "maxProperties")
+		if v17 != nil {
+			t, ok := v17.(int)
+			if ok {
+				x.MaxProperties = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// int64 min_properties = 18;
+		v18 := compiler.MapValueForKey(m, "minProperties")
+		if v18 != nil {
+			t, ok := v18.(int)
+			if ok {
+				x.MinProperties = int64(t)
+			} else {
+				message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated string required = 19;
+		v19 := compiler.MapValueForKey(m, "required")
+		if v19 != nil {
+			v, ok := v19.([]interface{})
+			if ok {
+				x.Required = compiler.ConvertInterfaceArrayToStringArray(v)
+			} else {
+				message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated Any enum = 20;
+		v20 := compiler.MapValueForKey(m, "enum")
+		if v20 != nil {
+			// repeated Any
+			x.Enum = make([]*Any, 0)
+			a, ok := v20.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewAny(item, compiler.NewContext("enum", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.Enum = append(x.Enum, y)
+				}
+			}
+		}
+		// AdditionalPropertiesItem additional_properties = 21;
+		v21 := compiler.MapValueForKey(m, "additionalProperties")
+		if v21 != nil {
+			var err error
+			x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// TypeItem type = 22;
+		v22 := compiler.MapValueForKey(m, "type")
+		if v22 != nil {
+			var err error
+			x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ItemsItem items = 23;
+		v23 := compiler.MapValueForKey(m, "items")
+		if v23 != nil {
+			var err error
+			x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated Schema all_of = 24;
+		v24 := compiler.MapValueForKey(m, "allOf")
+		if v24 != nil {
+			// repeated Schema
+			x.AllOf = make([]*Schema, 0)
+			a, ok := v24.([]interface{})
+			if ok {
+				for _, item := range a {
+					y, err := NewSchema(item, compiler.NewContext("allOf", context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+					x.AllOf = append(x.AllOf, y)
+				}
+			}
+		}
+		// Properties properties = 25;
+		v25 := compiler.MapValueForKey(m, "properties")
+		if v25 != nil {
+			var err error
+			x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// string discriminator = 26;
+		v26 := compiler.MapValueForKey(m, "discriminator")
+		if v26 != nil {
+			x.Discriminator, ok = v26.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool read_only = 27;
+		v27 := compiler.MapValueForKey(m, "readOnly")
+		if v27 != nil {
+			x.ReadOnly, ok = v27.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// Xml xml = 28;
+		v28 := compiler.MapValueForKey(m, "xml")
+		if v28 != nil {
+			var err error
+			x.Xml, err = NewXml(v28, compiler.NewContext("xml", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// ExternalDocs external_docs = 29;
+		v29 := compiler.MapValueForKey(m, "externalDocs")
+		if v29 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// Any example = 30;
+		v30 := compiler.MapValueForKey(m, "example")
+		if v30 != nil {
+			var err error
+			x.Example, err = NewAny(v30, compiler.NewContext("example", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 31;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not.
+func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) {
+	errors := make([]error, 0)
+	x := &SchemaItem{}
+	matched := false
+	// Schema schema = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewSchema(m, compiler.NewContext("schema", context))
+			if matchingError == nil {
+				x.Oneof = &SchemaItem_Schema{Schema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// FileSchema file_schema = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context))
+			if matchingError == nil {
+				x.Oneof = &SchemaItem_FileSchema{FileSchema: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not.
+func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) {
+	errors := make([]error, 0)
+	x := &SecurityDefinitions{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedSecurityDefinitionsItem additional_properties = 1;
+		// MAP: SecurityDefinitionsItem
+		x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedSecurityDefinitionsItem{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not.
+func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) {
+	errors := make([]error, 0)
+	x := &SecurityDefinitionsItem{}
+	matched := false
+	// BasicAuthenticationSecurity basic_authentication_security = 1;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// ApiKeySecurity api_key_security = 2;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2ImplicitSecurity oauth2_implicit_security = 3;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2PasswordSecurity oauth2_password_security = 4;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2ApplicationSecurity oauth2_application_security = 5;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	// Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
+	{
+		m, ok := compiler.UnpackMap(in)
+		if ok {
+			// errors might be ok here, they mean we just don't have the right subtype
+			t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context))
+			if matchingError == nil {
+				x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t}
+				matched = true
+			} else {
+				errors = append(errors, matchingError)
+			}
+		}
+	}
+	if matched {
+		// since the oneof matched one of its possibilities, discard any matching errors
+		errors = make([]error, 0)
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not.
+func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) {
+	errors := make([]error, 0)
+	x := &SecurityRequirement{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedStringArray additional_properties = 1;
+		// MAP: StringArray
+		x.AdditionalProperties = make([]*NamedStringArray, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedStringArray{}
+				pair.Name = k
+				var err error
+				pair.Value, err = NewStringArray(v, compiler.NewContext(k, context))
+				if err != nil {
+					errors = append(errors, err)
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewStringArray creates an object of type StringArray if possible, returning an error if not.
+func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) {
+	errors := make([]error, 0)
+	x := &StringArray{}
+	a, ok := in.([]interface{})
+	if !ok {
+		message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		x.Value = make([]string, 0)
+		for _, s := range a {
+			x.Value = append(x.Value, s.(string))
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewTag creates an object of type Tag if possible, returning an error if not.
+func NewTag(in interface{}, context *compiler.Context) (*Tag, error) {
+	errors := make([]error, 0)
+	x := &Tag{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		requiredKeys := []string{"name"}
+		missingKeys := compiler.MissingKeysInMap(m, requiredKeys)
+		if len(missingKeys) > 0 {
+			message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		allowedKeys := []string{"description", "externalDocs", "name"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string description = 2;
+		v2 := compiler.MapValueForKey(m, "description")
+		if v2 != nil {
+			x.Description, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// ExternalDocs external_docs = 3;
+		v3 := compiler.MapValueForKey(m, "externalDocs")
+		if v3 != nil {
+			var err error
+			x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context))
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+		// repeated NamedAny vendor_extension = 4;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewTypeItem creates an object of type TypeItem if possible, returning an error if not.
+func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) {
+	errors := make([]error, 0)
+	x := &TypeItem{}
+	switch in := in.(type) {
+	case string:
+		x.Value = make([]string, 0)
+		x.Value = append(x.Value, in)
+	case []interface{}:
+		x.Value = make([]string, 0)
+		for _, v := range in {
+			value, ok := v.(string)
+			if ok {
+				x.Value = append(x.Value, value)
+			} else {
+				message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+	default:
+		message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not.
+func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) {
+	errors := make([]error, 0)
+	x := &VendorExtension{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		// repeated NamedAny additional_properties = 1;
+		// MAP: Any
+		x.AdditionalProperties = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				pair := &NamedAny{}
+				pair.Name = k
+				result := &Any{}
+				handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+				if handled {
+					if err != nil {
+						errors = append(errors, err)
+					} else {
+						bytes, _ := yaml.Marshal(v)
+						result.Yaml = string(bytes)
+						result.Value = resultFromExt
+						pair.Value = result
+					}
+				} else {
+					pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+					if err != nil {
+						errors = append(errors, err)
+					}
+				}
+				x.AdditionalProperties = append(x.AdditionalProperties, pair)
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// NewXml creates an object of type Xml if possible, returning an error if not.
+func NewXml(in interface{}, context *compiler.Context) (*Xml, error) {
+	errors := make([]error, 0)
+	x := &Xml{}
+	m, ok := compiler.UnpackMap(in)
+	if !ok {
+		message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in)
+		errors = append(errors, compiler.NewError(context, message))
+	} else {
+		allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"}
+		allowedPatterns := []*regexp.Regexp{pattern0}
+		invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns)
+		if len(invalidKeys) > 0 {
+			message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", "))
+			errors = append(errors, compiler.NewError(context, message))
+		}
+		// string name = 1;
+		v1 := compiler.MapValueForKey(m, "name")
+		if v1 != nil {
+			x.Name, ok = v1.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string namespace = 2;
+		v2 := compiler.MapValueForKey(m, "namespace")
+		if v2 != nil {
+			x.Namespace, ok = v2.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// string prefix = 3;
+		v3 := compiler.MapValueForKey(m, "prefix")
+		if v3 != nil {
+			x.Prefix, ok = v3.(string)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool attribute = 4;
+		v4 := compiler.MapValueForKey(m, "attribute")
+		if v4 != nil {
+			x.Attribute, ok = v4.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// bool wrapped = 5;
+		v5 := compiler.MapValueForKey(m, "wrapped")
+		if v5 != nil {
+			x.Wrapped, ok = v5.(bool)
+			if !ok {
+				message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5)
+				errors = append(errors, compiler.NewError(context, message))
+			}
+		}
+		// repeated NamedAny vendor_extension = 6;
+		// MAP: Any ^x-
+		x.VendorExtension = make([]*NamedAny, 0)
+		for _, item := range m {
+			k, ok := compiler.StringValue(item.Key)
+			if ok {
+				v := item.Value
+				if strings.HasPrefix(k, "x-") {
+					pair := &NamedAny{}
+					pair.Name = k
+					result := &Any{}
+					handled, resultFromExt, err := compiler.HandleExtension(context, v, k)
+					if handled {
+						if err != nil {
+							errors = append(errors, err)
+						} else {
+							bytes, _ := yaml.Marshal(v)
+							result.Yaml = string(bytes)
+							result.Value = resultFromExt
+							pair.Value = result
+						}
+					} else {
+						pair.Value, err = NewAny(v, compiler.NewContext(k, context))
+						if err != nil {
+							errors = append(errors, err)
+						}
+					}
+					x.VendorExtension = append(x.VendorExtension, pair)
+				}
+			}
+		}
+	}
+	return x, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside AdditionalPropertiesItem objects.
+func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema)
+		if ok {
+			_, err := p.Schema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Any objects.
+func (m *Any) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ApiKeySecurity objects.
+func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects.
+func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside BodyParameter objects.
+func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Schema != nil {
+		_, err := m.Schema.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Contact objects.
+func (m *Contact) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Default objects.
+func (m *Default) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Definitions objects.
+func (m *Definitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Document objects.
+func (m *Document) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Info != nil {
+		_, err := m.Info.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Paths != nil {
+		_, err := m.Paths.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Definitions != nil {
+		_, err := m.Definitions.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Parameters != nil {
+		_, err := m.Parameters.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Responses != nil {
+		_, err := m.Responses.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Security {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.SecurityDefinitions != nil {
+		_, err := m.SecurityDefinitions.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Tags {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Examples objects.
+func (m *Examples) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ExternalDocs objects.
+func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside FileSchema objects.
+func (m *FileSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Example != nil {
+		_, err := m.Example.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside FormDataParameterSubSchema objects.
+func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Header objects.
+func (m *Header) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside HeaderParameterSubSchema objects.
+func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Headers objects.
+func (m *Headers) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Info objects.
+func (m *Info) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Contact != nil {
+		_, err := m.Contact.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.License != nil {
+		_, err := m.License.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ItemsItem objects.
+func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.Schema {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside JsonReference objects.
+func (m *JsonReference) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewJsonReference(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside License objects.
+func (m *License) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedAny objects.
+func (m *NamedAny) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedHeader objects.
+func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedParameter objects.
+func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedPathItem objects.
+func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedResponse objects.
+func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedResponseValue objects.
+func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedSchema objects.
+func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects.
+func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedString objects.
+func (m *NamedString) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NamedStringArray objects.
+func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Value != nil {
+		_, err := m.Value.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside NonBodyParameter objects.
+func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema)
+		if ok {
+			_, err := p.HeaderParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema)
+		if ok {
+			_, err := p.FormDataParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema)
+		if ok {
+			_, err := p.QueryParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema)
+		if ok {
+			_, err := p.PathParameterSubSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects.
+func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects.
+func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects.
+func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects.
+func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Scopes != nil {
+		_, err := m.Scopes.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Oauth2Scopes objects.
+func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Operation objects.
+func (m *Operation) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Parameters {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.Responses != nil {
+		_, err := m.Responses.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Security {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Parameter objects.
+func (m *Parameter) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*Parameter_BodyParameter)
+		if ok {
+			_, err := p.BodyParameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*Parameter_NonBodyParameter)
+		if ok {
+			_, err := p.NonBodyParameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ParameterDefinitions objects.
+func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ParametersItem objects.
+func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*ParametersItem_Parameter)
+		if ok {
+			_, err := p.Parameter.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*ParametersItem_JsonReference)
+		if ok {
+			info, err := p.JsonReference.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			} else if info != nil {
+				n, err := NewParametersItem(info, nil)
+				if err != nil {
+					return nil, err
+				} else if n != nil {
+					*m = *n
+					return nil, nil
+				}
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PathItem objects.
+func (m *PathItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewPathItem(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	if m.Get != nil {
+		_, err := m.Get.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Put != nil {
+		_, err := m.Put.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Post != nil {
+		_, err := m.Post.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Delete != nil {
+		_, err := m.Delete.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Options != nil {
+		_, err := m.Options.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Head != nil {
+		_, err := m.Head.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Patch != nil {
+		_, err := m.Patch.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Parameters {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PathParameterSubSchema objects.
+func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Paths objects.
+func (m *Paths) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.Path {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside PrimitivesItems objects.
+func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Properties objects.
+func (m *Properties) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside QueryParameterSubSchema objects.
+func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Response objects.
+func (m *Response) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.Schema != nil {
+		_, err := m.Schema.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Headers != nil {
+		_, err := m.Headers.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Examples != nil {
+		_, err := m.Examples.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ResponseDefinitions objects.
+func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside ResponseValue objects.
+func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*ResponseValue_Response)
+		if ok {
+			_, err := p.Response.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*ResponseValue_JsonReference)
+		if ok {
+			info, err := p.JsonReference.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			} else if info != nil {
+				n, err := NewResponseValue(info, nil)
+				if err != nil {
+					return nil, err
+				} else if n != nil {
+					*m = *n
+					return nil, nil
+				}
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Responses objects.
+func (m *Responses) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.ResponseCode {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Schema objects.
+func (m *Schema) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.XRef != "" {
+		info, err := compiler.ReadInfoForRef(root, m.XRef)
+		if err != nil {
+			return nil, err
+		}
+		if info != nil {
+			replacement, err := NewSchema(info, nil)
+			if err == nil {
+				*m = *replacement
+				return m.ResolveReferences(root)
+			}
+		}
+		return info, nil
+	}
+	if m.Default != nil {
+		_, err := m.Default.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.Enum {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.AdditionalProperties != nil {
+		_, err := m.AdditionalProperties.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Type != nil {
+		_, err := m.Type.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Items != nil {
+		_, err := m.Items.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.AllOf {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	if m.Properties != nil {
+		_, err := m.Properties.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Xml != nil {
+		_, err := m.Xml.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	if m.Example != nil {
+		_, err := m.Example.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SchemaItem objects.
+func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*SchemaItem_Schema)
+		if ok {
+			_, err := p.Schema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SchemaItem_FileSchema)
+		if ok {
+			_, err := p.FileSchema.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityDefinitions objects.
+func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityDefinitionsItem objects.
+func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity)
+		if ok {
+			_, err := p.BasicAuthenticationSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity)
+		if ok {
+			_, err := p.ApiKeySecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)
+		if ok {
+			_, err := p.Oauth2ImplicitSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity)
+		if ok {
+			_, err := p.Oauth2PasswordSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity)
+		if ok {
+			_, err := p.Oauth2ApplicationSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	{
+		p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)
+		if ok {
+			_, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside SecurityRequirement objects.
+func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside StringArray objects.
+func (m *StringArray) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Tag objects.
+func (m *Tag) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	if m.ExternalDocs != nil {
+		_, err := m.ExternalDocs.ResolveReferences(root)
+		if err != nil {
+			errors = append(errors, err)
+		}
+	}
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside TypeItem objects.
+func (m *TypeItem) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside VendorExtension objects.
+func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.AdditionalProperties {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ResolveReferences resolves references found inside Xml objects.
+func (m *Xml) ResolveReferences(root string) (interface{}, error) {
+	errors := make([]error, 0)
+	for _, item := range m.VendorExtension {
+		if item != nil {
+			_, err := item.ResolveReferences(root)
+			if err != nil {
+				errors = append(errors, err)
+			}
+		}
+	}
+	return nil, compiler.NewErrorGroupOrNil(errors)
+}
+
+// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export.
+func (m *AdditionalPropertiesItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// AdditionalPropertiesItem
+	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
+		return v1.Boolean
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Any suitable for JSON or YAML export.
+func (m *Any) ToRawInfo() interface{} {
+	var err error
+	var info1 []yaml.MapSlice
+	err = yaml.Unmarshal([]byte(m.Yaml), &info1)
+	if err == nil {
+		return info1
+	}
+	var info2 yaml.MapSlice
+	err = yaml.Unmarshal([]byte(m.Yaml), &info2)
+	if err == nil {
+		return info2
+	}
+	var info3 interface{}
+	err = yaml.Unmarshal([]byte(m.Yaml), &info3)
+	if err == nil {
+		return info3
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export.
+func (m *ApiKeySecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export.
+func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export.
+func (m *BodyParameter) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
+	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Contact suitable for JSON or YAML export.
+func (m *Contact) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Url != "" {
+		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	}
+	if m.Email != "" {
+		info = append(info, yaml.MapItem{Key: "email", Value: m.Email})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Default suitable for JSON or YAML export.
+func (m *Default) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Definitions suitable for JSON or YAML export.
+func (m *Definitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Document suitable for JSON or YAML export.
+func (m *Document) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()})
+	// &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Host != "" {
+		info = append(info, yaml.MapItem{Key: "host", Value: m.Host})
+	}
+	if m.BasePath != "" {
+		info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath})
+	}
+	if len(m.Schemes) != 0 {
+		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+	}
+	if len(m.Consumes) != 0 {
+		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+	}
+	if len(m.Produces) != 0 {
+		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()})
+	// &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Definitions != nil {
+		info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()})
+	}
+	// &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Parameters != nil {
+		info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()})
+	}
+	// &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Responses != nil {
+		info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
+	}
+	// &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Security) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Security {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "security", Value: items})
+	}
+	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.SecurityDefinitions != nil {
+		info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()})
+	}
+	// &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Tags) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Tags {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "tags", Value: items})
+	}
+	// &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Examples suitable for JSON or YAML export.
+func (m *Examples) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export.
+func (m *ExternalDocs) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export.
+func (m *FileSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Title != "" {
+		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Required) != 0 {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	if m.ReadOnly != false {
+		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Example != nil {
+		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+	}
+	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export.
+func (m *FormDataParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.AllowEmptyValue != false {
+		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Header suitable for JSON or YAML export.
+func (m *Header) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export.
+func (m *HeaderParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Headers suitable for JSON or YAML export.
+func (m *Headers) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Info suitable for JSON or YAML export.
+func (m *Info) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "version", Value: m.Version})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.TermsOfService != "" {
+		info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService})
+	}
+	if m.Contact != nil {
+		info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()})
+	}
+	// &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.License != nil {
+		info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()})
+	}
+	// &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export.
+func (m *ItemsItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if len(m.Schema) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Schema {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "schema", Value: items})
+	}
+	// &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	return info
+}
+
+// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export.
+func (m *JsonReference) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of License suitable for JSON or YAML export.
+func (m *License) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	if m.Url != "" {
+		info = append(info, yaml.MapItem{Key: "url", Value: m.Url})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export.
+func (m *NamedAny) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export.
+func (m *NamedHeader) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export.
+func (m *NamedParameter) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export.
+func (m *NamedPathItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export.
+func (m *NamedResponse) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export.
+func (m *NamedResponseValue) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export.
+func (m *NamedSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export.
+func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NamedString suitable for JSON or YAML export.
+func (m *NamedString) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Value != "" {
+		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export.
+func (m *NamedStringArray) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	// &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value}
+	return info
+}
+
+// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export.
+func (m *NonBodyParameter) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// NonBodyParameter
+	// {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetHeaderParameterSubSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetFormDataParameterSubSchema()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	// {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v2 := m.GetQueryParameterSubSchema()
+	if v2 != nil {
+		return v2.ToRawInfo()
+	}
+	// {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v3 := m.GetPathParameterSubSchema()
+	if v3 != nil {
+		return v3.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export.
+func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export.
+func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export.
+func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export.
+func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow})
+	if m.Scopes != nil {
+		info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()})
+	}
+	// &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export.
+func (m *Oauth2Scopes) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Operation suitable for JSON or YAML export.
+func (m *Operation) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if len(m.Tags) != 0 {
+		info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags})
+	}
+	if m.Summary != "" {
+		info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.OperationId != "" {
+		info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId})
+	}
+	if len(m.Produces) != 0 {
+		info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces})
+	}
+	if len(m.Consumes) != 0 {
+		info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes})
+	}
+	if len(m.Parameters) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Parameters {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+	}
+	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()})
+	// &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Schemes) != 0 {
+		info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes})
+	}
+	if m.Deprecated != false {
+		info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated})
+	}
+	if len(m.Security) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Security {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "security", Value: items})
+	}
+	// &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Parameter suitable for JSON or YAML export.
+func (m *Parameter) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// Parameter
+	// {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetBodyParameter()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetNonBodyParameter()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export.
+func (m *ParameterDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export.
+func (m *ParametersItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// ParametersItem
+	// {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetParameter()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetJsonReference()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of PathItem suitable for JSON or YAML export.
+func (m *PathItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.XRef != "" {
+		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	}
+	if m.Get != nil {
+		info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()})
+	}
+	// &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Put != nil {
+		info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()})
+	}
+	// &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Post != nil {
+		info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()})
+	}
+	// &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Delete != nil {
+		info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()})
+	}
+	// &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Options != nil {
+		info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()})
+	}
+	// &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Head != nil {
+		info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()})
+	}
+	// &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Patch != nil {
+		info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()})
+	}
+	// &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.Parameters) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Parameters {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "parameters", Value: items})
+	}
+	// &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export.
+func (m *PathParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Paths suitable for JSON or YAML export.
+func (m *Paths) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	if m.Path != nil {
+		for _, item := range m.Path {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export.
+func (m *PrimitivesItems) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Properties suitable for JSON or YAML export.
+func (m *Properties) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export.
+func (m *QueryParameterSubSchema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Required != false {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if m.In != "" {
+		info = append(info, yaml.MapItem{Key: "in", Value: m.In})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.AllowEmptyValue != false {
+		info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue})
+	}
+	if m.Type != "" {
+		info = append(info, yaml.MapItem{Key: "type", Value: m.Type})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Items != nil {
+		info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()})
+	}
+	// &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.CollectionFormat != "" {
+		info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Response suitable for JSON or YAML export.
+func (m *Response) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	if m.Schema != nil {
+		info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()})
+	}
+	// &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Headers != nil {
+		info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()})
+	}
+	// &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Examples != nil {
+		info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()})
+	}
+	// &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export.
+func (m *ResponseDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export.
+func (m *ResponseValue) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// ResponseValue
+	// {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetResponse()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetJsonReference()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of Responses suitable for JSON or YAML export.
+func (m *Responses) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.ResponseCode != nil {
+		for _, item := range m.ResponseCode {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Schema suitable for JSON or YAML export.
+func (m *Schema) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.XRef != "" {
+		info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef})
+	}
+	if m.Format != "" {
+		info = append(info, yaml.MapItem{Key: "format", Value: m.Format})
+	}
+	if m.Title != "" {
+		info = append(info, yaml.MapItem{Key: "title", Value: m.Title})
+	}
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.Default != nil {
+		info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()})
+	}
+	// &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.MultipleOf != 0.0 {
+		info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf})
+	}
+	if m.Maximum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum})
+	}
+	if m.ExclusiveMaximum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum})
+	}
+	if m.Minimum != 0.0 {
+		info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum})
+	}
+	if m.ExclusiveMinimum != false {
+		info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum})
+	}
+	if m.MaxLength != 0 {
+		info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength})
+	}
+	if m.MinLength != 0 {
+		info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength})
+	}
+	if m.Pattern != "" {
+		info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern})
+	}
+	if m.MaxItems != 0 {
+		info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems})
+	}
+	if m.MinItems != 0 {
+		info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems})
+	}
+	if m.UniqueItems != false {
+		info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems})
+	}
+	if m.MaxProperties != 0 {
+		info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties})
+	}
+	if m.MinProperties != 0 {
+		info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties})
+	}
+	if len(m.Required) != 0 {
+		info = append(info, yaml.MapItem{Key: "required", Value: m.Required})
+	}
+	if len(m.Enum) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.Enum {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "enum", Value: items})
+	}
+	// &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.AdditionalProperties != nil {
+		info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()})
+	}
+	// &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Type != nil {
+		if len(m.Type.Value) == 1 {
+			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]})
+		} else {
+			info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value})
+		}
+	}
+	// &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Items != nil {
+		items := make([]interface{}, 0)
+		for _, item := range m.Items.Schema {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "items", Value: items[0]})
+	}
+	// &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if len(m.AllOf) != 0 {
+		items := make([]interface{}, 0)
+		for _, item := range m.AllOf {
+			items = append(items, item.ToRawInfo())
+		}
+		info = append(info, yaml.MapItem{Key: "allOf", Value: items})
+	}
+	// &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:}
+	if m.Properties != nil {
+		info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()})
+	}
+	// &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Discriminator != "" {
+		info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator})
+	}
+	if m.ReadOnly != false {
+		info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly})
+	}
+	if m.Xml != nil {
+		info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()})
+	}
+	// &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.Example != nil {
+		info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()})
+	}
+	// &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export.
+func (m *SchemaItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// SchemaItem
+	// {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetSchema()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetFileSchema()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export.
+func (m *SecurityDefinitions) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export.
+func (m *SecurityDefinitionsItem) ToRawInfo() interface{} {
+	// ONE OF WRAPPER
+	// SecurityDefinitionsItem
+	// {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v0 := m.GetBasicAuthenticationSecurity()
+	if v0 != nil {
+		return v0.ToRawInfo()
+	}
+	// {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v1 := m.GetApiKeySecurity()
+	if v1 != nil {
+		return v1.ToRawInfo()
+	}
+	// {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v2 := m.GetOauth2ImplicitSecurity()
+	if v2 != nil {
+		return v2.ToRawInfo()
+	}
+	// {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v3 := m.GetOauth2PasswordSecurity()
+	if v3 != nil {
+		return v3.ToRawInfo()
+	}
+	// {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v4 := m.GetOauth2ApplicationSecurity()
+	if v4 != nil {
+		return v4.ToRawInfo()
+	}
+	// {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	v5 := m.GetOauth2AccessCodeSecurity()
+	if v5 != nil {
+		return v5.ToRawInfo()
+	}
+	return nil
+}
+
+// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export.
+func (m *SecurityRequirement) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of StringArray suitable for JSON or YAML export.
+func (m *StringArray) ToRawInfo() interface{} {
+	return m.Value
+}
+
+// ToRawInfo returns a description of Tag suitable for JSON or YAML export.
+func (m *Tag) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	// always include this required field.
+	info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	if m.Description != "" {
+		info = append(info, yaml.MapItem{Key: "description", Value: m.Description})
+	}
+	if m.ExternalDocs != nil {
+		info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()})
+	}
+	// &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export.
+func (m *TypeItem) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if len(m.Value) != 0 {
+		info = append(info, yaml.MapItem{Key: "value", Value: m.Value})
+	}
+	return info
+}
+
+// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export.
+func (m *VendorExtension) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.AdditionalProperties != nil {
+		for _, item := range m.AdditionalProperties {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:}
+	return info
+}
+
+// ToRawInfo returns a description of Xml suitable for JSON or YAML export.
+func (m *Xml) ToRawInfo() interface{} {
+	info := yaml.MapSlice{}
+	if m == nil {
+		return info
+	}
+	if m.Name != "" {
+		info = append(info, yaml.MapItem{Key: "name", Value: m.Name})
+	}
+	if m.Namespace != "" {
+		info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace})
+	}
+	if m.Prefix != "" {
+		info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix})
+	}
+	if m.Attribute != false {
+		info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute})
+	}
+	if m.Wrapped != false {
+		info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped})
+	}
+	if m.VendorExtension != nil {
+		for _, item := range m.VendorExtension {
+			info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()})
+		}
+	}
+	// &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:}
+	return info
+}
+
+var (
+	pattern0 = regexp.MustCompile("^x-")
+	pattern1 = regexp.MustCompile("^/")
+	pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$")
+)
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
new file mode 100644
index 0000000..a030fa6
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go
@@ -0,0 +1,4455 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: OpenAPIv2/OpenAPIv2.proto
+
+/*
+Package openapi_v2 is a generated protocol buffer package.
+
+It is generated from these files:
+	OpenAPIv2/OpenAPIv2.proto
+
+It has these top-level messages:
+	AdditionalPropertiesItem
+	Any
+	ApiKeySecurity
+	BasicAuthenticationSecurity
+	BodyParameter
+	Contact
+	Default
+	Definitions
+	Document
+	Examples
+	ExternalDocs
+	FileSchema
+	FormDataParameterSubSchema
+	Header
+	HeaderParameterSubSchema
+	Headers
+	Info
+	ItemsItem
+	JsonReference
+	License
+	NamedAny
+	NamedHeader
+	NamedParameter
+	NamedPathItem
+	NamedResponse
+	NamedResponseValue
+	NamedSchema
+	NamedSecurityDefinitionsItem
+	NamedString
+	NamedStringArray
+	NonBodyParameter
+	Oauth2AccessCodeSecurity
+	Oauth2ApplicationSecurity
+	Oauth2ImplicitSecurity
+	Oauth2PasswordSecurity
+	Oauth2Scopes
+	Operation
+	Parameter
+	ParameterDefinitions
+	ParametersItem
+	PathItem
+	PathParameterSubSchema
+	Paths
+	PrimitivesItems
+	Properties
+	QueryParameterSubSchema
+	Response
+	ResponseDefinitions
+	ResponseValue
+	Responses
+	Schema
+	SchemaItem
+	SecurityDefinitions
+	SecurityDefinitionsItem
+	SecurityRequirement
+	StringArray
+	Tag
+	TypeItem
+	VendorExtension
+	Xml
+*/
+package openapi_v2
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type AdditionalPropertiesItem struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*AdditionalPropertiesItem_Schema
+	//	*AdditionalPropertiesItem_Boolean
+	Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *AdditionalPropertiesItem) Reset()                    { *m = AdditionalPropertiesItem{} }
+func (m *AdditionalPropertiesItem) String() string            { return proto.CompactTextString(m) }
+func (*AdditionalPropertiesItem) ProtoMessage()               {}
+func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+type isAdditionalPropertiesItem_Oneof interface {
+	isAdditionalPropertiesItem_Oneof()
+}
+
+type AdditionalPropertiesItem_Schema struct {
+	Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"`
+}
+type AdditionalPropertiesItem_Boolean struct {
+	Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"`
+}
+
+func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof()  {}
+func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {}
+
+func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *AdditionalPropertiesItem) GetSchema() *Schema {
+	if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok {
+		return x.Schema
+	}
+	return nil
+}
+
+func (m *AdditionalPropertiesItem) GetBoolean() bool {
+	if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok {
+		return x.Boolean
+	}
+	return false
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{
+		(*AdditionalPropertiesItem_Schema)(nil),
+		(*AdditionalPropertiesItem_Boolean)(nil),
+	}
+}
+
+func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*AdditionalPropertiesItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *AdditionalPropertiesItem_Schema:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Schema); err != nil {
+			return err
+		}
+	case *AdditionalPropertiesItem_Boolean:
+		t := uint64(0)
+		if x.Boolean {
+			t = 1
+		}
+		b.EncodeVarint(2<<3 | proto.WireVarint)
+		b.EncodeVarint(t)
+	case nil:
+	default:
+		return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*AdditionalPropertiesItem)
+	switch tag {
+	case 1: // oneof.schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Schema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &AdditionalPropertiesItem_Schema{msg}
+		return true, err
+	case 2: // oneof.boolean
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*AdditionalPropertiesItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *AdditionalPropertiesItem_Schema:
+		s := proto.Size(x.Schema)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *AdditionalPropertiesItem_Boolean:
+		n += proto.SizeVarint(2<<3 | proto.WireVarint)
+		n += 1
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type Any struct {
+	Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"`
+	Yaml  string               `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"`
+}
+
+func (m *Any) Reset()                    { *m = Any{} }
+func (m *Any) String() string            { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()               {}
+func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *Any) GetValue() *google_protobuf.Any {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *Any) GetYaml() string {
+	if m != nil {
+		return m.Yaml
+	}
+	return ""
+}
+
+type ApiKeySecurity struct {
+	Type            string      `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Name            string      `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+	In              string      `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"`
+	Description     string      `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *ApiKeySecurity) Reset()                    { *m = ApiKeySecurity{} }
+func (m *ApiKeySecurity) String() string            { return proto.CompactTextString(m) }
+func (*ApiKeySecurity) ProtoMessage()               {}
+func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *ApiKeySecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *ApiKeySecurity) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *ApiKeySecurity) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *ApiKeySecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type BasicAuthenticationSecurity struct {
+	Type            string      `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Description     string      `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *BasicAuthenticationSecurity) Reset()                    { *m = BasicAuthenticationSecurity{} }
+func (m *BasicAuthenticationSecurity) String() string            { return proto.CompactTextString(m) }
+func (*BasicAuthenticationSecurity) ProtoMessage()               {}
+func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *BasicAuthenticationSecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *BasicAuthenticationSecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type BodyParameter struct {
+	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
+	// The name of the parameter.
+	Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+	// Determines the location of the parameter.
+	In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"`
+	// Determines whether or not this parameter is required or optional.
+	Required        bool        `protobuf:"varint,4,opt,name=required" json:"required,omitempty"`
+	Schema          *Schema     `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *BodyParameter) Reset()                    { *m = BodyParameter{} }
+func (m *BodyParameter) String() string            { return proto.CompactTextString(m) }
+func (*BodyParameter) ProtoMessage()               {}
+func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *BodyParameter) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *BodyParameter) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *BodyParameter) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *BodyParameter) GetRequired() bool {
+	if m != nil {
+		return m.Required
+	}
+	return false
+}
+
+func (m *BodyParameter) GetSchema() *Schema {
+	if m != nil {
+		return m.Schema
+	}
+	return nil
+}
+
+func (m *BodyParameter) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// Contact information for the owners of the API.
+type Contact struct {
+	// The identifying name of the contact person/organization.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The URL pointing to the contact information.
+	Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
+	// The email address of the contact person/organization.
+	Email           string      `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Contact) Reset()                    { *m = Contact{} }
+func (m *Contact) String() string            { return proto.CompactTextString(m) }
+func (*Contact) ProtoMessage()               {}
+func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *Contact) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Contact) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *Contact) GetEmail() string {
+	if m != nil {
+		return m.Email
+	}
+	return ""
+}
+
+func (m *Contact) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Default struct {
+	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Default) Reset()                    { *m = Default{} }
+func (m *Default) String() string            { return proto.CompactTextString(m) }
+func (*Default) ProtoMessage()               {}
+func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *Default) GetAdditionalProperties() []*NamedAny {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+// One or more JSON objects describing the schemas being consumed and produced by the API.
+type Definitions struct {
+	AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Definitions) Reset()                    { *m = Definitions{} }
+func (m *Definitions) String() string            { return proto.CompactTextString(m) }
+func (*Definitions) ProtoMessage()               {}
+func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *Definitions) GetAdditionalProperties() []*NamedSchema {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type Document struct {
+	// The Swagger version of this document.
+	Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"`
+	Info    *Info  `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
+	// The host (name or ip) of the API. Example: 'swagger.io'
+	Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"`
+	// The base path to the API. Example: '/api'.
+	BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"`
+	// The transfer protocol of the API.
+	Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"`
+	// A list of MIME types accepted by the API.
+	Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"`
+	// A list of MIME types the API can produce.
+	Produces            []string               `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"`
+	Paths               *Paths                 `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"`
+	Definitions         *Definitions           `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"`
+	Parameters          *ParameterDefinitions  `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"`
+	Responses           *ResponseDefinitions   `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"`
+	Security            []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"`
+	SecurityDefinitions *SecurityDefinitions   `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"`
+	Tags                []*Tag                 `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"`
+	ExternalDocs        *ExternalDocs          `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
+	VendorExtension     []*NamedAny            `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Document) Reset()                    { *m = Document{} }
+func (m *Document) String() string            { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage()               {}
+func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+func (m *Document) GetSwagger() string {
+	if m != nil {
+		return m.Swagger
+	}
+	return ""
+}
+
+func (m *Document) GetInfo() *Info {
+	if m != nil {
+		return m.Info
+	}
+	return nil
+}
+
+func (m *Document) GetHost() string {
+	if m != nil {
+		return m.Host
+	}
+	return ""
+}
+
+func (m *Document) GetBasePath() string {
+	if m != nil {
+		return m.BasePath
+	}
+	return ""
+}
+
+func (m *Document) GetSchemes() []string {
+	if m != nil {
+		return m.Schemes
+	}
+	return nil
+}
+
+func (m *Document) GetConsumes() []string {
+	if m != nil {
+		return m.Consumes
+	}
+	return nil
+}
+
+func (m *Document) GetProduces() []string {
+	if m != nil {
+		return m.Produces
+	}
+	return nil
+}
+
+func (m *Document) GetPaths() *Paths {
+	if m != nil {
+		return m.Paths
+	}
+	return nil
+}
+
+func (m *Document) GetDefinitions() *Definitions {
+	if m != nil {
+		return m.Definitions
+	}
+	return nil
+}
+
+func (m *Document) GetParameters() *ParameterDefinitions {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *Document) GetResponses() *ResponseDefinitions {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+func (m *Document) GetSecurity() []*SecurityRequirement {
+	if m != nil {
+		return m.Security
+	}
+	return nil
+}
+
+func (m *Document) GetSecurityDefinitions() *SecurityDefinitions {
+	if m != nil {
+		return m.SecurityDefinitions
+	}
+	return nil
+}
+
+func (m *Document) GetTags() []*Tag {
+	if m != nil {
+		return m.Tags
+	}
+	return nil
+}
+
+func (m *Document) GetExternalDocs() *ExternalDocs {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Document) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Examples struct {
+	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Examples) Reset()                    { *m = Examples{} }
+func (m *Examples) String() string            { return proto.CompactTextString(m) }
+func (*Examples) ProtoMessage()               {}
+func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+func (m *Examples) GetAdditionalProperties() []*NamedAny {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+// information about external documentation
+type ExternalDocs struct {
+	Description     string      `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
+	Url             string      `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *ExternalDocs) Reset()                    { *m = ExternalDocs{} }
+func (m *ExternalDocs) String() string            { return proto.CompactTextString(m) }
+func (*ExternalDocs) ProtoMessage()               {}
+func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+func (m *ExternalDocs) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *ExternalDocs) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *ExternalDocs) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// A deterministic version of a JSON Schema object.
+type FileSchema struct {
+	Format          string        `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"`
+	Title           string        `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"`
+	Description     string        `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	Default         *Any          `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"`
+	Required        []string      `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"`
+	Type            string        `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
+	ReadOnly        bool          `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
+	ExternalDocs    *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
+	Example         *Any          `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"`
+	VendorExtension []*NamedAny   `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *FileSchema) Reset()                    { *m = FileSchema{} }
+func (m *FileSchema) String() string            { return proto.CompactTextString(m) }
+func (*FileSchema) ProtoMessage()               {}
+func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+func (m *FileSchema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *FileSchema) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *FileSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *FileSchema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *FileSchema) GetRequired() []string {
+	if m != nil {
+		return m.Required
+	}
+	return nil
+}
+
+func (m *FileSchema) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *FileSchema) GetReadOnly() bool {
+	if m != nil {
+		return m.ReadOnly
+	}
+	return false
+}
+
+func (m *FileSchema) GetExternalDocs() *ExternalDocs {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *FileSchema) GetExample() *Any {
+	if m != nil {
+		return m.Example
+	}
+	return nil
+}
+
+func (m *FileSchema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type FormDataParameterSubSchema struct {
+	// Determines whether or not this parameter is required or optional.
+	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+	// Determines the location of the parameter.
+	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
+	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The name of the parameter.
+	Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	// allows sending a parameter by name only or with an empty value.
+	AllowEmptyValue  bool             `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"`
+	Type             string           `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *FormDataParameterSubSchema) Reset()                    { *m = FormDataParameterSubSchema{} }
+func (m *FormDataParameterSubSchema) String() string            { return proto.CompactTextString(m) }
+func (*FormDataParameterSubSchema) ProtoMessage()               {}
+func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+func (m *FormDataParameterSubSchema) GetRequired() bool {
+	if m != nil {
+		return m.Required
+	}
+	return false
+}
+
+func (m *FormDataParameterSubSchema) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool {
+	if m != nil {
+		return m.AllowEmptyValue
+	}
+	return false
+}
+
+func (m *FormDataParameterSubSchema) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *FormDataParameterSubSchema) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *FormDataParameterSubSchema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *FormDataParameterSubSchema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *FormDataParameterSubSchema) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *FormDataParameterSubSchema) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *FormDataParameterSubSchema) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *FormDataParameterSubSchema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Header struct {
+	Type             string           `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	Description      string           `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Header) Reset()                    { *m = Header{} }
+func (m *Header) String() string            { return proto.CompactTextString(m) }
+func (*Header) ProtoMessage()               {}
+func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+func (m *Header) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Header) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *Header) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *Header) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *Header) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *Header) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *Header) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *Header) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *Header) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *Header) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *Header) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *Header) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *Header) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *Header) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *Header) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *Header) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *Header) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *Header) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Header) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type HeaderParameterSubSchema struct {
+	// Determines whether or not this parameter is required or optional.
+	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+	// Determines the location of the parameter.
+	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
+	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The name of the parameter.
+	Name             string           `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	Type             string           `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *HeaderParameterSubSchema) Reset()                    { *m = HeaderParameterSubSchema{} }
+func (m *HeaderParameterSubSchema) String() string            { return proto.CompactTextString(m) }
+func (*HeaderParameterSubSchema) ProtoMessage()               {}
+func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+func (m *HeaderParameterSubSchema) GetRequired() bool {
+	if m != nil {
+		return m.Required
+	}
+	return false
+}
+
+func (m *HeaderParameterSubSchema) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *HeaderParameterSubSchema) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *HeaderParameterSubSchema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *HeaderParameterSubSchema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *HeaderParameterSubSchema) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *HeaderParameterSubSchema) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *HeaderParameterSubSchema) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *HeaderParameterSubSchema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Headers struct {
+	AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Headers) Reset()                    { *m = Headers{} }
+func (m *Headers) String() string            { return proto.CompactTextString(m) }
+func (*Headers) ProtoMessage()               {}
+func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+func (m *Headers) GetAdditionalProperties() []*NamedHeader {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+// General information about the API.
+type Info struct {
+	// A unique and precise title of the API.
+	Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"`
+	// A semantic version number of the API.
+	Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+	// A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The terms of service for the API.
+	TermsOfService  string      `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"`
+	Contact         *Contact    `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"`
+	License         *License    `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Info) Reset()                    { *m = Info{} }
+func (m *Info) String() string            { return proto.CompactTextString(m) }
+func (*Info) ProtoMessage()               {}
+func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+func (m *Info) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *Info) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Info) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Info) GetTermsOfService() string {
+	if m != nil {
+		return m.TermsOfService
+	}
+	return ""
+}
+
+func (m *Info) GetContact() *Contact {
+	if m != nil {
+		return m.Contact
+	}
+	return nil
+}
+
+func (m *Info) GetLicense() *License {
+	if m != nil {
+		return m.License
+	}
+	return nil
+}
+
+func (m *Info) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type ItemsItem struct {
+	Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"`
+}
+
+func (m *ItemsItem) Reset()                    { *m = ItemsItem{} }
+func (m *ItemsItem) String() string            { return proto.CompactTextString(m) }
+func (*ItemsItem) ProtoMessage()               {}
+func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *ItemsItem) GetSchema() []*Schema {
+	if m != nil {
+		return m.Schema
+	}
+	return nil
+}
+
+type JsonReference struct {
+	XRef        string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
+	Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
+}
+
+func (m *JsonReference) Reset()                    { *m = JsonReference{} }
+func (m *JsonReference) String() string            { return proto.CompactTextString(m) }
+func (*JsonReference) ProtoMessage()               {}
+func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *JsonReference) GetXRef() string {
+	if m != nil {
+		return m.XRef
+	}
+	return ""
+}
+
+func (m *JsonReference) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+type License struct {
+	// The name of the license type. It's encouraged to use an OSI compatible license.
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// The URL pointing to the license.
+	Url             string      `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *License) Reset()                    { *m = License{} }
+func (m *License) String() string            { return proto.CompactTextString(m) }
+func (*License) ProtoMessage()               {}
+func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *License) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *License) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *License) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
+type NamedAny struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedAny) Reset()                    { *m = NamedAny{} }
+func (m *NamedAny) String() string            { return proto.CompactTextString(m) }
+func (*NamedAny) ProtoMessage()               {}
+func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
+
+func (m *NamedAny) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedAny) GetValue() *Any {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
+type NamedHeader struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedHeader) Reset()                    { *m = NamedHeader{} }
+func (m *NamedHeader) String() string            { return proto.CompactTextString(m) }
+func (*NamedHeader) ProtoMessage()               {}
+func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
+
+func (m *NamedHeader) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedHeader) GetValue() *Header {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
+type NamedParameter struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedParameter) Reset()                    { *m = NamedParameter{} }
+func (m *NamedParameter) String() string            { return proto.CompactTextString(m) }
+func (*NamedParameter) ProtoMessage()               {}
+func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
+
+func (m *NamedParameter) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedParameter) GetValue() *Parameter {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
+type NamedPathItem struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedPathItem) Reset()                    { *m = NamedPathItem{} }
+func (m *NamedPathItem) String() string            { return proto.CompactTextString(m) }
+func (*NamedPathItem) ProtoMessage()               {}
+func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
+
+func (m *NamedPathItem) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedPathItem) GetValue() *PathItem {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
+type NamedResponse struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedResponse) Reset()                    { *m = NamedResponse{} }
+func (m *NamedResponse) String() string            { return proto.CompactTextString(m) }
+func (*NamedResponse) ProtoMessage()               {}
+func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
+
+func (m *NamedResponse) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedResponse) GetValue() *Response {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
+type NamedResponseValue struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedResponseValue) Reset()                    { *m = NamedResponseValue{} }
+func (m *NamedResponseValue) String() string            { return proto.CompactTextString(m) }
+func (*NamedResponseValue) ProtoMessage()               {}
+func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
+
+func (m *NamedResponseValue) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedResponseValue) GetValue() *ResponseValue {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
+type NamedSchema struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedSchema) Reset()                    { *m = NamedSchema{} }
+func (m *NamedSchema) String() string            { return proto.CompactTextString(m) }
+func (*NamedSchema) ProtoMessage()               {}
+func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
+
+func (m *NamedSchema) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedSchema) GetValue() *Schema {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
+type NamedSecurityDefinitionsItem struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedSecurityDefinitionsItem) Reset()                    { *m = NamedSecurityDefinitionsItem{} }
+func (m *NamedSecurityDefinitionsItem) String() string            { return proto.CompactTextString(m) }
+func (*NamedSecurityDefinitionsItem) ProtoMessage()               {}
+func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
+
+func (m *NamedSecurityDefinitionsItem) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
+type NamedString struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedString) Reset()                    { *m = NamedString{} }
+func (m *NamedString) String() string            { return proto.CompactTextString(m) }
+func (*NamedString) ProtoMessage()               {}
+func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
+
+func (m *NamedString) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedString) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
+type NamedStringArray struct {
+	// Map key
+	Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Mapped value
+	Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+}
+
+func (m *NamedStringArray) Reset()                    { *m = NamedStringArray{} }
+func (m *NamedStringArray) String() string            { return proto.CompactTextString(m) }
+func (*NamedStringArray) ProtoMessage()               {}
+func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
+
+func (m *NamedStringArray) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *NamedStringArray) GetValue() *StringArray {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type NonBodyParameter struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*NonBodyParameter_HeaderParameterSubSchema
+	//	*NonBodyParameter_FormDataParameterSubSchema
+	//	*NonBodyParameter_QueryParameterSubSchema
+	//	*NonBodyParameter_PathParameterSubSchema
+	Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *NonBodyParameter) Reset()                    { *m = NonBodyParameter{} }
+func (m *NonBodyParameter) String() string            { return proto.CompactTextString(m) }
+func (*NonBodyParameter) ProtoMessage()               {}
+func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
+
+type isNonBodyParameter_Oneof interface {
+	isNonBodyParameter_Oneof()
+}
+
+type NonBodyParameter_HeaderParameterSubSchema struct {
+	HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"`
+}
+type NonBodyParameter_FormDataParameterSubSchema struct {
+	FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"`
+}
+type NonBodyParameter_QueryParameterSubSchema struct {
+	QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"`
+}
+type NonBodyParameter_PathParameterSubSchema struct {
+	PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"`
+}
+
+func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof()   {}
+func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {}
+func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof()    {}
+func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof()     {}
+
+func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema {
+	if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok {
+		return x.HeaderParameterSubSchema
+	}
+	return nil
+}
+
+func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema {
+	if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok {
+		return x.FormDataParameterSubSchema
+	}
+	return nil
+}
+
+func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema {
+	if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok {
+		return x.QueryParameterSubSchema
+	}
+	return nil
+}
+
+func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema {
+	if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok {
+		return x.PathParameterSubSchema
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{
+		(*NonBodyParameter_HeaderParameterSubSchema)(nil),
+		(*NonBodyParameter_FormDataParameterSubSchema)(nil),
+		(*NonBodyParameter_QueryParameterSubSchema)(nil),
+		(*NonBodyParameter_PathParameterSubSchema)(nil),
+	}
+}
+
+func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*NonBodyParameter)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *NonBodyParameter_HeaderParameterSubSchema:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil {
+			return err
+		}
+	case *NonBodyParameter_FormDataParameterSubSchema:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil {
+			return err
+		}
+	case *NonBodyParameter_QueryParameterSubSchema:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil {
+			return err
+		}
+	case *NonBodyParameter_PathParameterSubSchema:
+		b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*NonBodyParameter)
+	switch tag {
+	case 1: // oneof.header_parameter_sub_schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(HeaderParameterSubSchema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg}
+		return true, err
+	case 2: // oneof.form_data_parameter_sub_schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(FormDataParameterSubSchema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg}
+		return true, err
+	case 3: // oneof.query_parameter_sub_schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(QueryParameterSubSchema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg}
+		return true, err
+	case 4: // oneof.path_parameter_sub_schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(PathParameterSubSchema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*NonBodyParameter)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *NonBodyParameter_HeaderParameterSubSchema:
+		s := proto.Size(x.HeaderParameterSubSchema)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *NonBodyParameter_FormDataParameterSubSchema:
+		s := proto.Size(x.FormDataParameterSubSchema)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *NonBodyParameter_QueryParameterSubSchema:
+		s := proto.Size(x.QueryParameterSubSchema)
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *NonBodyParameter_PathParameterSubSchema:
+		s := proto.Size(x.PathParameterSubSchema)
+		n += proto.SizeVarint(4<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type Oauth2AccessCodeSecurity struct {
+	Type             string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Flow             string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
+	Scopes           *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
+	AuthorizationUrl string        `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"`
+	TokenUrl         string        `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
+	Description      string        `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"`
+	VendorExtension  []*NamedAny   `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Oauth2AccessCodeSecurity) Reset()                    { *m = Oauth2AccessCodeSecurity{} }
+func (m *Oauth2AccessCodeSecurity) String() string            { return proto.CompactTextString(m) }
+func (*Oauth2AccessCodeSecurity) ProtoMessage()               {}
+func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
+
+func (m *Oauth2AccessCodeSecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Oauth2AccessCodeSecurity) GetFlow() string {
+	if m != nil {
+		return m.Flow
+	}
+	return ""
+}
+
+func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string {
+	if m != nil {
+		return m.AuthorizationUrl
+	}
+	return ""
+}
+
+func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string {
+	if m != nil {
+		return m.TokenUrl
+	}
+	return ""
+}
+
+func (m *Oauth2AccessCodeSecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Oauth2ApplicationSecurity struct {
+	Type            string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Flow            string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
+	Scopes          *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
+	TokenUrl        string        `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
+	Description     string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
+	VendorExtension []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Oauth2ApplicationSecurity) Reset()                    { *m = Oauth2ApplicationSecurity{} }
+func (m *Oauth2ApplicationSecurity) String() string            { return proto.CompactTextString(m) }
+func (*Oauth2ApplicationSecurity) ProtoMessage()               {}
+func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
+
+func (m *Oauth2ApplicationSecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Oauth2ApplicationSecurity) GetFlow() string {
+	if m != nil {
+		return m.Flow
+	}
+	return ""
+}
+
+func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+func (m *Oauth2ApplicationSecurity) GetTokenUrl() string {
+	if m != nil {
+		return m.TokenUrl
+	}
+	return ""
+}
+
+func (m *Oauth2ApplicationSecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Oauth2ImplicitSecurity struct {
+	Type             string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Flow             string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
+	Scopes           *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
+	AuthorizationUrl string        `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"`
+	Description      string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
+	VendorExtension  []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Oauth2ImplicitSecurity) Reset()                    { *m = Oauth2ImplicitSecurity{} }
+func (m *Oauth2ImplicitSecurity) String() string            { return proto.CompactTextString(m) }
+func (*Oauth2ImplicitSecurity) ProtoMessage()               {}
+func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
+
+func (m *Oauth2ImplicitSecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Oauth2ImplicitSecurity) GetFlow() string {
+	if m != nil {
+		return m.Flow
+	}
+	return ""
+}
+
+func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string {
+	if m != nil {
+		return m.AuthorizationUrl
+	}
+	return ""
+}
+
+func (m *Oauth2ImplicitSecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Oauth2PasswordSecurity struct {
+	Type            string        `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Flow            string        `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"`
+	Scopes          *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"`
+	TokenUrl        string        `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"`
+	Description     string        `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"`
+	VendorExtension []*NamedAny   `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Oauth2PasswordSecurity) Reset()                    { *m = Oauth2PasswordSecurity{} }
+func (m *Oauth2PasswordSecurity) String() string            { return proto.CompactTextString(m) }
+func (*Oauth2PasswordSecurity) ProtoMessage()               {}
+func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
+
+func (m *Oauth2PasswordSecurity) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Oauth2PasswordSecurity) GetFlow() string {
+	if m != nil {
+		return m.Flow
+	}
+	return ""
+}
+
+func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes {
+	if m != nil {
+		return m.Scopes
+	}
+	return nil
+}
+
+func (m *Oauth2PasswordSecurity) GetTokenUrl() string {
+	if m != nil {
+		return m.TokenUrl
+	}
+	return ""
+}
+
+func (m *Oauth2PasswordSecurity) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Oauth2Scopes struct {
+	AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Oauth2Scopes) Reset()                    { *m = Oauth2Scopes{} }
+func (m *Oauth2Scopes) String() string            { return proto.CompactTextString(m) }
+func (*Oauth2Scopes) ProtoMessage()               {}
+func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
+
+func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type Operation struct {
+	Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"`
+	// A brief summary of the operation.
+	Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+	// A longer description of the operation, GitHub Flavored Markdown is allowed.
+	Description  string        `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
+	// A unique identifier of the operation.
+	OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"`
+	// A list of MIME types the API can produce.
+	Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"`
+	// A list of MIME types the API can consume.
+	Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"`
+	// The parameters needed to send a valid API call.
+	Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"`
+	Responses  *Responses        `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"`
+	// The transfer protocol of the API.
+	Schemes         []string               `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"`
+	Deprecated      bool                   `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"`
+	Security        []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"`
+	VendorExtension []*NamedAny            `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Operation) Reset()                    { *m = Operation{} }
+func (m *Operation) String() string            { return proto.CompactTextString(m) }
+func (*Operation) ProtoMessage()               {}
+func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
+
+func (m *Operation) GetTags() []string {
+	if m != nil {
+		return m.Tags
+	}
+	return nil
+}
+
+func (m *Operation) GetSummary() string {
+	if m != nil {
+		return m.Summary
+	}
+	return ""
+}
+
+func (m *Operation) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Operation) GetExternalDocs() *ExternalDocs {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Operation) GetOperationId() string {
+	if m != nil {
+		return m.OperationId
+	}
+	return ""
+}
+
+func (m *Operation) GetProduces() []string {
+	if m != nil {
+		return m.Produces
+	}
+	return nil
+}
+
+func (m *Operation) GetConsumes() []string {
+	if m != nil {
+		return m.Consumes
+	}
+	return nil
+}
+
+func (m *Operation) GetParameters() []*ParametersItem {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *Operation) GetResponses() *Responses {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+func (m *Operation) GetSchemes() []string {
+	if m != nil {
+		return m.Schemes
+	}
+	return nil
+}
+
+func (m *Operation) GetDeprecated() bool {
+	if m != nil {
+		return m.Deprecated
+	}
+	return false
+}
+
+func (m *Operation) GetSecurity() []*SecurityRequirement {
+	if m != nil {
+		return m.Security
+	}
+	return nil
+}
+
+func (m *Operation) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Parameter struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*Parameter_BodyParameter
+	//	*Parameter_NonBodyParameter
+	Oneof isParameter_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *Parameter) Reset()                    { *m = Parameter{} }
+func (m *Parameter) String() string            { return proto.CompactTextString(m) }
+func (*Parameter) ProtoMessage()               {}
+func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
+
+type isParameter_Oneof interface {
+	isParameter_Oneof()
+}
+
+type Parameter_BodyParameter struct {
+	BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"`
+}
+type Parameter_NonBodyParameter struct {
+	NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"`
+}
+
+func (*Parameter_BodyParameter) isParameter_Oneof()    {}
+func (*Parameter_NonBodyParameter) isParameter_Oneof() {}
+
+func (m *Parameter) GetOneof() isParameter_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *Parameter) GetBodyParameter() *BodyParameter {
+	if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok {
+		return x.BodyParameter
+	}
+	return nil
+}
+
+func (m *Parameter) GetNonBodyParameter() *NonBodyParameter {
+	if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok {
+		return x.NonBodyParameter
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{
+		(*Parameter_BodyParameter)(nil),
+		(*Parameter_NonBodyParameter)(nil),
+	}
+}
+
+func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*Parameter)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *Parameter_BodyParameter:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.BodyParameter); err != nil {
+			return err
+		}
+	case *Parameter_NonBodyParameter:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.NonBodyParameter); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("Parameter.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*Parameter)
+	switch tag {
+	case 1: // oneof.body_parameter
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(BodyParameter)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &Parameter_BodyParameter{msg}
+		return true, err
+	case 2: // oneof.non_body_parameter
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(NonBodyParameter)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &Parameter_NonBodyParameter{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _Parameter_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*Parameter)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *Parameter_BodyParameter:
+		s := proto.Size(x.BodyParameter)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *Parameter_NonBodyParameter:
+		s := proto.Size(x.NonBodyParameter)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// One or more JSON representations for parameters
+type ParameterDefinitions struct {
+	AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *ParameterDefinitions) Reset()                    { *m = ParameterDefinitions{} }
+func (m *ParameterDefinitions) String() string            { return proto.CompactTextString(m) }
+func (*ParameterDefinitions) ProtoMessage()               {}
+func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
+
+func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type ParametersItem struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*ParametersItem_Parameter
+	//	*ParametersItem_JsonReference
+	Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *ParametersItem) Reset()                    { *m = ParametersItem{} }
+func (m *ParametersItem) String() string            { return proto.CompactTextString(m) }
+func (*ParametersItem) ProtoMessage()               {}
+func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
+
+type isParametersItem_Oneof interface {
+	isParametersItem_Oneof()
+}
+
+type ParametersItem_Parameter struct {
+	Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"`
+}
+type ParametersItem_JsonReference struct {
+	JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"`
+}
+
+func (*ParametersItem_Parameter) isParametersItem_Oneof()     {}
+func (*ParametersItem_JsonReference) isParametersItem_Oneof() {}
+
+func (m *ParametersItem) GetOneof() isParametersItem_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *ParametersItem) GetParameter() *Parameter {
+	if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok {
+		return x.Parameter
+	}
+	return nil
+}
+
+func (m *ParametersItem) GetJsonReference() *JsonReference {
+	if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok {
+		return x.JsonReference
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{
+		(*ParametersItem_Parameter)(nil),
+		(*ParametersItem_JsonReference)(nil),
+	}
+}
+
+func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*ParametersItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *ParametersItem_Parameter:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Parameter); err != nil {
+			return err
+		}
+	case *ParametersItem_JsonReference:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.JsonReference); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*ParametersItem)
+	switch tag {
+	case 1: // oneof.parameter
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Parameter)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &ParametersItem_Parameter{msg}
+		return true, err
+	case 2: // oneof.json_reference
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(JsonReference)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &ParametersItem_JsonReference{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _ParametersItem_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*ParametersItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *ParametersItem_Parameter:
+		s := proto.Size(x.Parameter)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ParametersItem_JsonReference:
+		s := proto.Size(x.JsonReference)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type PathItem struct {
+	XRef    string     `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
+	Get     *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"`
+	Put     *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"`
+	Post    *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"`
+	Delete  *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"`
+	Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"`
+	Head    *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"`
+	Patch   *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"`
+	// The parameters needed to send a valid API call.
+	Parameters      []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"`
+	VendorExtension []*NamedAny       `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *PathItem) Reset()                    { *m = PathItem{} }
+func (m *PathItem) String() string            { return proto.CompactTextString(m) }
+func (*PathItem) ProtoMessage()               {}
+func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
+
+func (m *PathItem) GetXRef() string {
+	if m != nil {
+		return m.XRef
+	}
+	return ""
+}
+
+func (m *PathItem) GetGet() *Operation {
+	if m != nil {
+		return m.Get
+	}
+	return nil
+}
+
+func (m *PathItem) GetPut() *Operation {
+	if m != nil {
+		return m.Put
+	}
+	return nil
+}
+
+func (m *PathItem) GetPost() *Operation {
+	if m != nil {
+		return m.Post
+	}
+	return nil
+}
+
+func (m *PathItem) GetDelete() *Operation {
+	if m != nil {
+		return m.Delete
+	}
+	return nil
+}
+
+func (m *PathItem) GetOptions() *Operation {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *PathItem) GetHead() *Operation {
+	if m != nil {
+		return m.Head
+	}
+	return nil
+}
+
+func (m *PathItem) GetPatch() *Operation {
+	if m != nil {
+		return m.Patch
+	}
+	return nil
+}
+
+func (m *PathItem) GetParameters() []*ParametersItem {
+	if m != nil {
+		return m.Parameters
+	}
+	return nil
+}
+
+func (m *PathItem) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type PathParameterSubSchema struct {
+	// Determines whether or not this parameter is required or optional.
+	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+	// Determines the location of the parameter.
+	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
+	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The name of the parameter.
+	Name             string           `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	Type             string           `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *PathParameterSubSchema) Reset()                    { *m = PathParameterSubSchema{} }
+func (m *PathParameterSubSchema) String() string            { return proto.CompactTextString(m) }
+func (*PathParameterSubSchema) ProtoMessage()               {}
+func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
+
+func (m *PathParameterSubSchema) GetRequired() bool {
+	if m != nil {
+		return m.Required
+	}
+	return false
+}
+
+func (m *PathParameterSubSchema) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *PathParameterSubSchema) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *PathParameterSubSchema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *PathParameterSubSchema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *PathParameterSubSchema) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *PathParameterSubSchema) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *PathParameterSubSchema) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *PathParameterSubSchema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
+type Paths struct {
+	VendorExtension []*NamedAny      `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+	Path            []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"`
+}
+
+func (m *Paths) Reset()                    { *m = Paths{} }
+func (m *Paths) String() string            { return proto.CompactTextString(m) }
+func (*Paths) ProtoMessage()               {}
+func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
+
+func (m *Paths) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+func (m *Paths) GetPath() []*NamedPathItem {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+type PrimitivesItems struct {
+	Type             string           `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *PrimitivesItems) Reset()                    { *m = PrimitivesItems{} }
+func (m *PrimitivesItems) String() string            { return proto.CompactTextString(m) }
+func (*PrimitivesItems) ProtoMessage()               {}
+func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
+
+func (m *PrimitivesItems) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *PrimitivesItems) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *PrimitivesItems) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *PrimitivesItems) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *PrimitivesItems) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *PrimitivesItems) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *PrimitivesItems) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *PrimitivesItems) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *PrimitivesItems) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *PrimitivesItems) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *PrimitivesItems) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *PrimitivesItems) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Properties struct {
+	AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *Properties) Reset()                    { *m = Properties{} }
+func (m *Properties) String() string            { return proto.CompactTextString(m) }
+func (*Properties) ProtoMessage()               {}
+func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
+
+func (m *Properties) GetAdditionalProperties() []*NamedSchema {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type QueryParameterSubSchema struct {
+	// Determines whether or not this parameter is required or optional.
+	Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"`
+	// Determines the location of the parameter.
+	In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"`
+	// A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+	Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"`
+	// The name of the parameter.
+	Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+	// allows sending a parameter by name only or with an empty value.
+	AllowEmptyValue  bool             `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"`
+	Type             string           `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"`
+	Format           string           `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"`
+	Items            *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"`
+	CollectionFormat string           `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"`
+	Default          *Any             `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"`
+	Maximum          float64          `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum bool             `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum          float64          `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum bool             `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength        int64            `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength        int64            `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern          string           `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems         int64            `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems         int64            `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems      bool             `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	Enum             []*Any           `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"`
+	MultipleOf       float64          `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	VendorExtension  []*NamedAny      `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *QueryParameterSubSchema) Reset()                    { *m = QueryParameterSubSchema{} }
+func (m *QueryParameterSubSchema) String() string            { return proto.CompactTextString(m) }
+func (*QueryParameterSubSchema) ProtoMessage()               {}
+func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
+
+func (m *QueryParameterSubSchema) GetRequired() bool {
+	if m != nil {
+		return m.Required
+	}
+	return false
+}
+
+func (m *QueryParameterSubSchema) GetIn() string {
+	if m != nil {
+		return m.In
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool {
+	if m != nil {
+		return m.AllowEmptyValue
+	}
+	return false
+}
+
+func (m *QueryParameterSubSchema) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *QueryParameterSubSchema) GetCollectionFormat() string {
+	if m != nil {
+		return m.CollectionFormat
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *QueryParameterSubSchema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *QueryParameterSubSchema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *QueryParameterSubSchema) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *QueryParameterSubSchema) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *QueryParameterSubSchema) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *QueryParameterSubSchema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type Response struct {
+	Description     string      `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"`
+	Schema          *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"`
+	Headers         *Headers    `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"`
+	Examples        *Examples   `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Response) Reset()                    { *m = Response{} }
+func (m *Response) String() string            { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage()               {}
+func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} }
+
+func (m *Response) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Response) GetSchema() *SchemaItem {
+	if m != nil {
+		return m.Schema
+	}
+	return nil
+}
+
+func (m *Response) GetHeaders() *Headers {
+	if m != nil {
+		return m.Headers
+	}
+	return nil
+}
+
+func (m *Response) GetExamples() *Examples {
+	if m != nil {
+		return m.Examples
+	}
+	return nil
+}
+
+func (m *Response) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// One or more JSON representations for parameters
+type ResponseDefinitions struct {
+	AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *ResponseDefinitions) Reset()                    { *m = ResponseDefinitions{} }
+func (m *ResponseDefinitions) String() string            { return proto.CompactTextString(m) }
+func (*ResponseDefinitions) ProtoMessage()               {}
+func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} }
+
+func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type ResponseValue struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*ResponseValue_Response
+	//	*ResponseValue_JsonReference
+	Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *ResponseValue) Reset()                    { *m = ResponseValue{} }
+func (m *ResponseValue) String() string            { return proto.CompactTextString(m) }
+func (*ResponseValue) ProtoMessage()               {}
+func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} }
+
+type isResponseValue_Oneof interface {
+	isResponseValue_Oneof()
+}
+
+type ResponseValue_Response struct {
+	Response *Response `protobuf:"bytes,1,opt,name=response,oneof"`
+}
+type ResponseValue_JsonReference struct {
+	JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"`
+}
+
+func (*ResponseValue_Response) isResponseValue_Oneof()      {}
+func (*ResponseValue_JsonReference) isResponseValue_Oneof() {}
+
+func (m *ResponseValue) GetOneof() isResponseValue_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *ResponseValue) GetResponse() *Response {
+	if x, ok := m.GetOneof().(*ResponseValue_Response); ok {
+		return x.Response
+	}
+	return nil
+}
+
+func (m *ResponseValue) GetJsonReference() *JsonReference {
+	if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok {
+		return x.JsonReference
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{
+		(*ResponseValue_Response)(nil),
+		(*ResponseValue_JsonReference)(nil),
+	}
+}
+
+func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*ResponseValue)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *ResponseValue_Response:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Response); err != nil {
+			return err
+		}
+	case *ResponseValue_JsonReference:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.JsonReference); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*ResponseValue)
+	switch tag {
+	case 1: // oneof.response
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Response)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &ResponseValue_Response{msg}
+		return true, err
+	case 2: // oneof.json_reference
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(JsonReference)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &ResponseValue_JsonReference{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _ResponseValue_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*ResponseValue)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *ResponseValue_Response:
+		s := proto.Size(x.Response)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseValue_JsonReference:
+		s := proto.Size(x.JsonReference)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// Response objects names can either be any valid HTTP status code or 'default'.
+type Responses struct {
+	ResponseCode    []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"`
+	VendorExtension []*NamedAny           `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Responses) Reset()                    { *m = Responses{} }
+func (m *Responses) String() string            { return proto.CompactTextString(m) }
+func (*Responses) ProtoMessage()               {}
+func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} }
+
+func (m *Responses) GetResponseCode() []*NamedResponseValue {
+	if m != nil {
+		return m.ResponseCode
+	}
+	return nil
+}
+
+func (m *Responses) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+// A deterministic version of a JSON Schema object.
+type Schema struct {
+	XRef                 string                    `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"`
+	Format               string                    `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"`
+	Title                string                    `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"`
+	Description          string                    `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"`
+	Default              *Any                      `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"`
+	MultipleOf           float64                   `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"`
+	Maximum              float64                   `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"`
+	ExclusiveMaximum     bool                      `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"`
+	Minimum              float64                   `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"`
+	ExclusiveMinimum     bool                      `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"`
+	MaxLength            int64                     `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"`
+	MinLength            int64                     `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"`
+	Pattern              string                    `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"`
+	MaxItems             int64                     `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"`
+	MinItems             int64                     `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"`
+	UniqueItems          bool                      `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"`
+	MaxProperties        int64                     `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"`
+	MinProperties        int64                     `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"`
+	Required             []string                  `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"`
+	Enum                 []*Any                    `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"`
+	AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+	Type                 *TypeItem                 `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"`
+	Items                *ItemsItem                `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"`
+	AllOf                []*Schema                 `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"`
+	Properties           *Properties               `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"`
+	Discriminator        string                    `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"`
+	ReadOnly             bool                      `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"`
+	Xml                  *Xml                      `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"`
+	ExternalDocs         *ExternalDocs             `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
+	Example              *Any                      `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"`
+	VendorExtension      []*NamedAny               `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Schema) Reset()                    { *m = Schema{} }
+func (m *Schema) String() string            { return proto.CompactTextString(m) }
+func (*Schema) ProtoMessage()               {}
+func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} }
+
+func (m *Schema) GetXRef() string {
+	if m != nil {
+		return m.XRef
+	}
+	return ""
+}
+
+func (m *Schema) GetFormat() string {
+	if m != nil {
+		return m.Format
+	}
+	return ""
+}
+
+func (m *Schema) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *Schema) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Schema) GetDefault() *Any {
+	if m != nil {
+		return m.Default
+	}
+	return nil
+}
+
+func (m *Schema) GetMultipleOf() float64 {
+	if m != nil {
+		return m.MultipleOf
+	}
+	return 0
+}
+
+func (m *Schema) GetMaximum() float64 {
+	if m != nil {
+		return m.Maximum
+	}
+	return 0
+}
+
+func (m *Schema) GetExclusiveMaximum() bool {
+	if m != nil {
+		return m.ExclusiveMaximum
+	}
+	return false
+}
+
+func (m *Schema) GetMinimum() float64 {
+	if m != nil {
+		return m.Minimum
+	}
+	return 0
+}
+
+func (m *Schema) GetExclusiveMinimum() bool {
+	if m != nil {
+		return m.ExclusiveMinimum
+	}
+	return false
+}
+
+func (m *Schema) GetMaxLength() int64 {
+	if m != nil {
+		return m.MaxLength
+	}
+	return 0
+}
+
+func (m *Schema) GetMinLength() int64 {
+	if m != nil {
+		return m.MinLength
+	}
+	return 0
+}
+
+func (m *Schema) GetPattern() string {
+	if m != nil {
+		return m.Pattern
+	}
+	return ""
+}
+
+func (m *Schema) GetMaxItems() int64 {
+	if m != nil {
+		return m.MaxItems
+	}
+	return 0
+}
+
+func (m *Schema) GetMinItems() int64 {
+	if m != nil {
+		return m.MinItems
+	}
+	return 0
+}
+
+func (m *Schema) GetUniqueItems() bool {
+	if m != nil {
+		return m.UniqueItems
+	}
+	return false
+}
+
+func (m *Schema) GetMaxProperties() int64 {
+	if m != nil {
+		return m.MaxProperties
+	}
+	return 0
+}
+
+func (m *Schema) GetMinProperties() int64 {
+	if m != nil {
+		return m.MinProperties
+	}
+	return 0
+}
+
+func (m *Schema) GetRequired() []string {
+	if m != nil {
+		return m.Required
+	}
+	return nil
+}
+
+func (m *Schema) GetEnum() []*Any {
+	if m != nil {
+		return m.Enum
+	}
+	return nil
+}
+
+func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+func (m *Schema) GetType() *TypeItem {
+	if m != nil {
+		return m.Type
+	}
+	return nil
+}
+
+func (m *Schema) GetItems() *ItemsItem {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func (m *Schema) GetAllOf() []*Schema {
+	if m != nil {
+		return m.AllOf
+	}
+	return nil
+}
+
+func (m *Schema) GetProperties() *Properties {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+func (m *Schema) GetDiscriminator() string {
+	if m != nil {
+		return m.Discriminator
+	}
+	return ""
+}
+
+func (m *Schema) GetReadOnly() bool {
+	if m != nil {
+		return m.ReadOnly
+	}
+	return false
+}
+
+func (m *Schema) GetXml() *Xml {
+	if m != nil {
+		return m.Xml
+	}
+	return nil
+}
+
+func (m *Schema) GetExternalDocs() *ExternalDocs {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Schema) GetExample() *Any {
+	if m != nil {
+		return m.Example
+	}
+	return nil
+}
+
+func (m *Schema) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type SchemaItem struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*SchemaItem_Schema
+	//	*SchemaItem_FileSchema
+	Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *SchemaItem) Reset()                    { *m = SchemaItem{} }
+func (m *SchemaItem) String() string            { return proto.CompactTextString(m) }
+func (*SchemaItem) ProtoMessage()               {}
+func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} }
+
+type isSchemaItem_Oneof interface {
+	isSchemaItem_Oneof()
+}
+
+type SchemaItem_Schema struct {
+	Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"`
+}
+type SchemaItem_FileSchema struct {
+	FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"`
+}
+
+func (*SchemaItem_Schema) isSchemaItem_Oneof()     {}
+func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {}
+
+func (m *SchemaItem) GetOneof() isSchemaItem_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *SchemaItem) GetSchema() *Schema {
+	if x, ok := m.GetOneof().(*SchemaItem_Schema); ok {
+		return x.Schema
+	}
+	return nil
+}
+
+func (m *SchemaItem) GetFileSchema() *FileSchema {
+	if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok {
+		return x.FileSchema
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{
+		(*SchemaItem_Schema)(nil),
+		(*SchemaItem_FileSchema)(nil),
+	}
+}
+
+func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*SchemaItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *SchemaItem_Schema:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Schema); err != nil {
+			return err
+		}
+	case *SchemaItem_FileSchema:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.FileSchema); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*SchemaItem)
+	switch tag {
+	case 1: // oneof.schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Schema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SchemaItem_Schema{msg}
+		return true, err
+	case 2: // oneof.file_schema
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(FileSchema)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SchemaItem_FileSchema{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _SchemaItem_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*SchemaItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *SchemaItem_Schema:
+		s := proto.Size(x.Schema)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SchemaItem_FileSchema:
+		s := proto.Size(x.FileSchema)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type SecurityDefinitions struct {
+	AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *SecurityDefinitions) Reset()                    { *m = SecurityDefinitions{} }
+func (m *SecurityDefinitions) String() string            { return proto.CompactTextString(m) }
+func (*SecurityDefinitions) ProtoMessage()               {}
+func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} }
+
+func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type SecurityDefinitionsItem struct {
+	// Types that are valid to be assigned to Oneof:
+	//	*SecurityDefinitionsItem_BasicAuthenticationSecurity
+	//	*SecurityDefinitionsItem_ApiKeySecurity
+	//	*SecurityDefinitionsItem_Oauth2ImplicitSecurity
+	//	*SecurityDefinitionsItem_Oauth2PasswordSecurity
+	//	*SecurityDefinitionsItem_Oauth2ApplicationSecurity
+	//	*SecurityDefinitionsItem_Oauth2AccessCodeSecurity
+	Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"`
+}
+
+func (m *SecurityDefinitionsItem) Reset()                    { *m = SecurityDefinitionsItem{} }
+func (m *SecurityDefinitionsItem) String() string            { return proto.CompactTextString(m) }
+func (*SecurityDefinitionsItem) ProtoMessage()               {}
+func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} }
+
+type isSecurityDefinitionsItem_Oneof interface {
+	isSecurityDefinitionsItem_Oneof()
+}
+
+type SecurityDefinitionsItem_BasicAuthenticationSecurity struct {
+	BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"`
+}
+type SecurityDefinitionsItem_ApiKeySecurity struct {
+	ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"`
+}
+type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct {
+	Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"`
+}
+type SecurityDefinitionsItem_Oauth2PasswordSecurity struct {
+	Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"`
+}
+type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct {
+	Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"`
+}
+type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct {
+	Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"`
+}
+
+func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {}
+func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof()              {}
+func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof()      {}
+func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof()      {}
+func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof()   {}
+func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof()    {}
+
+func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof {
+	if m != nil {
+		return m.Oneof
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok {
+		return x.BasicAuthenticationSecurity
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok {
+		return x.ApiKeySecurity
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok {
+		return x.Oauth2ImplicitSecurity
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok {
+		return x.Oauth2PasswordSecurity
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok {
+		return x.Oauth2ApplicationSecurity
+	}
+	return nil
+}
+
+func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity {
+	if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok {
+		return x.Oauth2AccessCodeSecurity
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{
+		(*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil),
+		(*SecurityDefinitionsItem_ApiKeySecurity)(nil),
+		(*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil),
+		(*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil),
+		(*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil),
+		(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil),
+	}
+}
+
+func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*SecurityDefinitionsItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *SecurityDefinitionsItem_BasicAuthenticationSecurity:
+		b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil {
+			return err
+		}
+	case *SecurityDefinitionsItem_ApiKeySecurity:
+		b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ApiKeySecurity); err != nil {
+			return err
+		}
+	case *SecurityDefinitionsItem_Oauth2ImplicitSecurity:
+		b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil {
+			return err
+		}
+	case *SecurityDefinitionsItem_Oauth2PasswordSecurity:
+		b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil {
+			return err
+		}
+	case *SecurityDefinitionsItem_Oauth2ApplicationSecurity:
+		b.EncodeVarint(5<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil {
+			return err
+		}
+	case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity:
+		b.EncodeVarint(6<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*SecurityDefinitionsItem)
+	switch tag {
+	case 1: // oneof.basic_authentication_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(BasicAuthenticationSecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg}
+		return true, err
+	case 2: // oneof.api_key_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(ApiKeySecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg}
+		return true, err
+	case 3: // oneof.oauth2_implicit_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Oauth2ImplicitSecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg}
+		return true, err
+	case 4: // oneof.oauth2_password_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Oauth2PasswordSecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg}
+		return true, err
+	case 5: // oneof.oauth2_application_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Oauth2ApplicationSecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg}
+		return true, err
+	case 6: // oneof.oauth2_access_code_security
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(Oauth2AccessCodeSecurity)
+		err := b.DecodeMessage(msg)
+		m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*SecurityDefinitionsItem)
+	// oneof
+	switch x := m.Oneof.(type) {
+	case *SecurityDefinitionsItem_BasicAuthenticationSecurity:
+		s := proto.Size(x.BasicAuthenticationSecurity)
+		n += proto.SizeVarint(1<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SecurityDefinitionsItem_ApiKeySecurity:
+		s := proto.Size(x.ApiKeySecurity)
+		n += proto.SizeVarint(2<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SecurityDefinitionsItem_Oauth2ImplicitSecurity:
+		s := proto.Size(x.Oauth2ImplicitSecurity)
+		n += proto.SizeVarint(3<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SecurityDefinitionsItem_Oauth2PasswordSecurity:
+		s := proto.Size(x.Oauth2PasswordSecurity)
+		n += proto.SizeVarint(4<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SecurityDefinitionsItem_Oauth2ApplicationSecurity:
+		s := proto.Size(x.Oauth2ApplicationSecurity)
+		n += proto.SizeVarint(5<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity:
+		s := proto.Size(x.Oauth2AccessCodeSecurity)
+		n += proto.SizeVarint(6<<3 | proto.WireBytes)
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type SecurityRequirement struct {
+	AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *SecurityRequirement) Reset()                    { *m = SecurityRequirement{} }
+func (m *SecurityRequirement) String() string            { return proto.CompactTextString(m) }
+func (*SecurityRequirement) ProtoMessage()               {}
+func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} }
+
+func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type StringArray struct {
+	Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
+}
+
+func (m *StringArray) Reset()                    { *m = StringArray{} }
+func (m *StringArray) String() string            { return proto.CompactTextString(m) }
+func (*StringArray) ProtoMessage()               {}
+func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} }
+
+func (m *StringArray) GetValue() []string {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Tag struct {
+	Name            string        `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Description     string        `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"`
+	ExternalDocs    *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"`
+	VendorExtension []*NamedAny   `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Tag) Reset()                    { *m = Tag{} }
+func (m *Tag) String() string            { return proto.CompactTextString(m) }
+func (*Tag) ProtoMessage()               {}
+func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} }
+
+func (m *Tag) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Tag) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *Tag) GetExternalDocs() *ExternalDocs {
+	if m != nil {
+		return m.ExternalDocs
+	}
+	return nil
+}
+
+func (m *Tag) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+type TypeItem struct {
+	Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"`
+}
+
+func (m *TypeItem) Reset()                    { *m = TypeItem{} }
+func (m *TypeItem) String() string            { return proto.CompactTextString(m) }
+func (*TypeItem) ProtoMessage()               {}
+func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} }
+
+func (m *TypeItem) GetValue() []string {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+// Any property starting with x- is valid.
+type VendorExtension struct {
+	AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"`
+}
+
+func (m *VendorExtension) Reset()                    { *m = VendorExtension{} }
+func (m *VendorExtension) String() string            { return proto.CompactTextString(m) }
+func (*VendorExtension) ProtoMessage()               {}
+func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} }
+
+func (m *VendorExtension) GetAdditionalProperties() []*NamedAny {
+	if m != nil {
+		return m.AdditionalProperties
+	}
+	return nil
+}
+
+type Xml struct {
+	Name            string      `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Namespace       string      `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"`
+	Prefix          string      `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"`
+	Attribute       bool        `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"`
+	Wrapped         bool        `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"`
+	VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"`
+}
+
+func (m *Xml) Reset()                    { *m = Xml{} }
+func (m *Xml) String() string            { return proto.CompactTextString(m) }
+func (*Xml) ProtoMessage()               {}
+func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} }
+
+func (m *Xml) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Xml) GetNamespace() string {
+	if m != nil {
+		return m.Namespace
+	}
+	return ""
+}
+
+func (m *Xml) GetPrefix() string {
+	if m != nil {
+		return m.Prefix
+	}
+	return ""
+}
+
+func (m *Xml) GetAttribute() bool {
+	if m != nil {
+		return m.Attribute
+	}
+	return false
+}
+
+func (m *Xml) GetWrapped() bool {
+	if m != nil {
+		return m.Wrapped
+	}
+	return false
+}
+
+func (m *Xml) GetVendorExtension() []*NamedAny {
+	if m != nil {
+		return m.VendorExtension
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem")
+	proto.RegisterType((*Any)(nil), "openapi.v2.Any")
+	proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity")
+	proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity")
+	proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter")
+	proto.RegisterType((*Contact)(nil), "openapi.v2.Contact")
+	proto.RegisterType((*Default)(nil), "openapi.v2.Default")
+	proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions")
+	proto.RegisterType((*Document)(nil), "openapi.v2.Document")
+	proto.RegisterType((*Examples)(nil), "openapi.v2.Examples")
+	proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs")
+	proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema")
+	proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema")
+	proto.RegisterType((*Header)(nil), "openapi.v2.Header")
+	proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema")
+	proto.RegisterType((*Headers)(nil), "openapi.v2.Headers")
+	proto.RegisterType((*Info)(nil), "openapi.v2.Info")
+	proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem")
+	proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference")
+	proto.RegisterType((*License)(nil), "openapi.v2.License")
+	proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny")
+	proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader")
+	proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter")
+	proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem")
+	proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse")
+	proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue")
+	proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema")
+	proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem")
+	proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString")
+	proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray")
+	proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter")
+	proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity")
+	proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity")
+	proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity")
+	proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity")
+	proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes")
+	proto.RegisterType((*Operation)(nil), "openapi.v2.Operation")
+	proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter")
+	proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions")
+	proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem")
+	proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem")
+	proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema")
+	proto.RegisterType((*Paths)(nil), "openapi.v2.Paths")
+	proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems")
+	proto.RegisterType((*Properties)(nil), "openapi.v2.Properties")
+	proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema")
+	proto.RegisterType((*Response)(nil), "openapi.v2.Response")
+	proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions")
+	proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue")
+	proto.RegisterType((*Responses)(nil), "openapi.v2.Responses")
+	proto.RegisterType((*Schema)(nil), "openapi.v2.Schema")
+	proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem")
+	proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions")
+	proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem")
+	proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement")
+	proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray")
+	proto.RegisterType((*Tag)(nil), "openapi.v2.Tag")
+	proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem")
+	proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension")
+	proto.RegisterType((*Xml)(nil), "openapi.v2.Xml")
+}
+
+func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 3129 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57,
+	0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c,
+	0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb,
+	0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a,
+	0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5,
+	0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d,
+	0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7,
+	0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7,
+	0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f,
+	0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c,
+	0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c,
+	0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc,
+	0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01,
+	0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2,
+	0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5,
+	0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23,
+	0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51,
+	0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2,
+	0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5,
+	0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c,
+	0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89,
+	0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f,
+	0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d,
+	0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c,
+	0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e,
+	0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1,
+	0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55,
+	0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87,
+	0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4,
+	0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30,
+	0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6,
+	0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17,
+	0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4,
+	0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74,
+	0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2,
+	0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4,
+	0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0,
+	0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa,
+	0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61,
+	0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e,
+	0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b,
+	0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0,
+	0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7,
+	0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5,
+	0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e,
+	0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04,
+	0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d,
+	0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d,
+	0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13,
+	0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18,
+	0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9,
+	0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73,
+	0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a,
+	0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9,
+	0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8,
+	0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19,
+	0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9,
+	0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60,
+	0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29,
+	0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92,
+	0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc,
+	0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a,
+	0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe,
+	0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6,
+	0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8,
+	0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90,
+	0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf,
+	0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64,
+	0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0,
+	0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d,
+	0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18,
+	0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4,
+	0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58,
+	0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21,
+	0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d,
+	0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9,
+	0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67,
+	0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17,
+	0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27,
+	0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5,
+	0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a,
+	0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4,
+	0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e,
+	0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2,
+	0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87,
+	0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38,
+	0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93,
+	0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0,
+	0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b,
+	0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2,
+	0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86,
+	0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c,
+	0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02,
+	0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3,
+	0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3,
+	0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e,
+	0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7,
+	0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94,
+	0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34,
+	0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a,
+	0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8,
+	0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b,
+	0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5,
+	0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e,
+	0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89,
+	0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6,
+	0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c,
+	0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14,
+	0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41,
+	0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7,
+	0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d,
+	0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc,
+	0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7,
+	0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3,
+	0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69,
+	0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36,
+	0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e,
+	0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c,
+	0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab,
+	0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33,
+	0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07,
+	0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f,
+	0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f,
+	0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b,
+	0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc,
+	0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e,
+	0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad,
+	0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16,
+	0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89,
+	0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45,
+	0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8,
+	0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b,
+	0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08,
+	0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93,
+	0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7,
+	0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb,
+	0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74,
+	0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41,
+	0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20,
+	0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a,
+	0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94,
+	0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e,
+	0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4,
+	0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6,
+	0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36,
+	0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a,
+	0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58,
+	0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46,
+	0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88,
+	0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c,
+	0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81,
+	0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61,
+	0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4,
+	0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea,
+	0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf,
+	0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b,
+	0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0,
+	0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4,
+	0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5,
+	0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44,
+	0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f,
+	0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36,
+	0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd,
+	0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e,
+	0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb,
+	0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d,
+	0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77,
+	0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9,
+	0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5,
+	0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4,
+	0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6,
+	0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f,
+	0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb,
+	0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7,
+	0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66,
+	0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9,
+	0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d,
+	0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2,
+	0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c,
+	0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2,
+	0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62,
+	0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7,
+	0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1,
+	0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21,
+	0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e,
+	0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6,
+	0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe,
+	0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c,
+	0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae,
+	0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e,
+	0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1,
+	0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8,
+	0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47,
+	0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf,
+	0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff,
+	0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00,
+}
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
new file mode 100644
index 0000000..557c880
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto
@@ -0,0 +1,663 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// THIS FILE IS AUTOMATICALLY GENERATED.
+
+syntax = "proto3";
+
+package openapi.v2;
+
+import "google/protobuf/any.proto";
+
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "OpenAPIProto";
+
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.openapi_v2";
+
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+option objc_class_prefix = "OAS";
+
+message AdditionalPropertiesItem {
+  oneof oneof {
+    Schema schema = 1;
+    bool boolean = 2;
+  }
+}
+
+message Any {
+  google.protobuf.Any value = 1;
+  string yaml = 2;
+}
+
+message ApiKeySecurity {
+  string type = 1;
+  string name = 2;
+  string in = 3;
+  string description = 4;
+  repeated NamedAny vendor_extension = 5;
+}
+
+message BasicAuthenticationSecurity {
+  string type = 1;
+  string description = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+message BodyParameter {
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 1;
+  // The name of the parameter.
+  string name = 2;
+  // Determines the location of the parameter.
+  string in = 3;
+  // Determines whether or not this parameter is required or optional.
+  bool required = 4;
+  Schema schema = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+// Contact information for the owners of the API.
+message Contact {
+  // The identifying name of the contact person/organization.
+  string name = 1;
+  // The URL pointing to the contact information.
+  string url = 2;
+  // The email address of the contact person/organization.
+  string email = 3;
+  repeated NamedAny vendor_extension = 4;
+}
+
+message Default {
+  repeated NamedAny additional_properties = 1;
+}
+
+// One or more JSON objects describing the schemas being consumed and produced by the API.
+message Definitions {
+  repeated NamedSchema additional_properties = 1;
+}
+
+message Document {
+  // The Swagger version of this document.
+  string swagger = 1;
+  Info info = 2;
+  // The host (name or ip) of the API. Example: 'swagger.io'
+  string host = 3;
+  // The base path to the API. Example: '/api'.
+  string base_path = 4;
+  // The transfer protocol of the API.
+  repeated string schemes = 5;
+  // A list of MIME types accepted by the API.
+  repeated string consumes = 6;
+  // A list of MIME types the API can produce.
+  repeated string produces = 7;
+  Paths paths = 8;
+  Definitions definitions = 9;
+  ParameterDefinitions parameters = 10;
+  ResponseDefinitions responses = 11;
+  repeated SecurityRequirement security = 12;
+  SecurityDefinitions security_definitions = 13;
+  repeated Tag tags = 14;
+  ExternalDocs external_docs = 15;
+  repeated NamedAny vendor_extension = 16;
+}
+
+message Examples {
+  repeated NamedAny additional_properties = 1;
+}
+
+// information about external documentation
+message ExternalDocs {
+  string description = 1;
+  string url = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+// A deterministic version of a JSON Schema object.
+message FileSchema {
+  string format = 1;
+  string title = 2;
+  string description = 3;
+  Any default = 4;
+  repeated string required = 5;
+  string type = 6;
+  bool read_only = 7;
+  ExternalDocs external_docs = 8;
+  Any example = 9;
+  repeated NamedAny vendor_extension = 10;
+}
+
+message FormDataParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  // allows sending a parameter by name only or with an empty value.
+  bool allow_empty_value = 5;
+  string type = 6;
+  string format = 7;
+  PrimitivesItems items = 8;
+  string collection_format = 9;
+  Any default = 10;
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  int64 max_length = 15;
+  int64 min_length = 16;
+  string pattern = 17;
+  int64 max_items = 18;
+  int64 min_items = 19;
+  bool unique_items = 20;
+  repeated Any enum = 21;
+  double multiple_of = 22;
+  repeated NamedAny vendor_extension = 23;
+}
+
+message Header {
+  string type = 1;
+  string format = 2;
+  PrimitivesItems items = 3;
+  string collection_format = 4;
+  Any default = 5;
+  double maximum = 6;
+  bool exclusive_maximum = 7;
+  double minimum = 8;
+  bool exclusive_minimum = 9;
+  int64 max_length = 10;
+  int64 min_length = 11;
+  string pattern = 12;
+  int64 max_items = 13;
+  int64 min_items = 14;
+  bool unique_items = 15;
+  repeated Any enum = 16;
+  double multiple_of = 17;
+  string description = 18;
+  repeated NamedAny vendor_extension = 19;
+}
+
+message HeaderParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  string type = 5;
+  string format = 6;
+  PrimitivesItems items = 7;
+  string collection_format = 8;
+  Any default = 9;
+  double maximum = 10;
+  bool exclusive_maximum = 11;
+  double minimum = 12;
+  bool exclusive_minimum = 13;
+  int64 max_length = 14;
+  int64 min_length = 15;
+  string pattern = 16;
+  int64 max_items = 17;
+  int64 min_items = 18;
+  bool unique_items = 19;
+  repeated Any enum = 20;
+  double multiple_of = 21;
+  repeated NamedAny vendor_extension = 22;
+}
+
+message Headers {
+  repeated NamedHeader additional_properties = 1;
+}
+
+// General information about the API.
+message Info {
+  // A unique and precise title of the API.
+  string title = 1;
+  // A semantic version number of the API.
+  string version = 2;
+  // A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The terms of service for the API.
+  string terms_of_service = 4;
+  Contact contact = 5;
+  License license = 6;
+  repeated NamedAny vendor_extension = 7;
+}
+
+message ItemsItem {
+  repeated Schema schema = 1;
+}
+
+message JsonReference {
+  string _ref = 1;
+  string description = 2;
+}
+
+message License {
+  // The name of the license type. It's encouraged to use an OSI compatible license.
+  string name = 1;
+  // The URL pointing to the license.
+  string url = 2;
+  repeated NamedAny vendor_extension = 3;
+}
+
+// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs.
+message NamedAny {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Any value = 2;
+}
+
+// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs.
+message NamedHeader {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Header value = 2;
+}
+
+// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs.
+message NamedParameter {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Parameter value = 2;
+}
+
+// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs.
+message NamedPathItem {
+  // Map key
+  string name = 1;
+  // Mapped value
+  PathItem value = 2;
+}
+
+// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs.
+message NamedResponse {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Response value = 2;
+}
+
+// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs.
+message NamedResponseValue {
+  // Map key
+  string name = 1;
+  // Mapped value
+  ResponseValue value = 2;
+}
+
+// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs.
+message NamedSchema {
+  // Map key
+  string name = 1;
+  // Mapped value
+  Schema value = 2;
+}
+
+// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs.
+message NamedSecurityDefinitionsItem {
+  // Map key
+  string name = 1;
+  // Mapped value
+  SecurityDefinitionsItem value = 2;
+}
+
+// Automatically-generated message used to represent maps of string as ordered (name,value) pairs.
+message NamedString {
+  // Map key
+  string name = 1;
+  // Mapped value
+  string value = 2;
+}
+
+// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs.
+message NamedStringArray {
+  // Map key
+  string name = 1;
+  // Mapped value
+  StringArray value = 2;
+}
+
+message NonBodyParameter {
+  oneof oneof {
+    HeaderParameterSubSchema header_parameter_sub_schema = 1;
+    FormDataParameterSubSchema form_data_parameter_sub_schema = 2;
+    QueryParameterSubSchema query_parameter_sub_schema = 3;
+    PathParameterSubSchema path_parameter_sub_schema = 4;
+  }
+}
+
+message Oauth2AccessCodeSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string authorization_url = 4;
+  string token_url = 5;
+  string description = 6;
+  repeated NamedAny vendor_extension = 7;
+}
+
+message Oauth2ApplicationSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string token_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2ImplicitSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string authorization_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2PasswordSecurity {
+  string type = 1;
+  string flow = 2;
+  Oauth2Scopes scopes = 3;
+  string token_url = 4;
+  string description = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
+message Oauth2Scopes {
+  repeated NamedString additional_properties = 1;
+}
+
+message Operation {
+  repeated string tags = 1;
+  // A brief summary of the operation.
+  string summary = 2;
+  // A longer description of the operation, GitHub Flavored Markdown is allowed.
+  string description = 3;
+  ExternalDocs external_docs = 4;
+  // A unique identifier of the operation.
+  string operation_id = 5;
+  // A list of MIME types the API can produce.
+  repeated string produces = 6;
+  // A list of MIME types the API can consume.
+  repeated string consumes = 7;
+  // The parameters needed to send a valid API call.
+  repeated ParametersItem parameters = 8;
+  Responses responses = 9;
+  // The transfer protocol of the API.
+  repeated string schemes = 10;
+  bool deprecated = 11;
+  repeated SecurityRequirement security = 12;
+  repeated NamedAny vendor_extension = 13;
+}
+
+message Parameter {
+  oneof oneof {
+    BodyParameter body_parameter = 1;
+    NonBodyParameter non_body_parameter = 2;
+  }
+}
+
+// One or more JSON representations for parameters
+message ParameterDefinitions {
+  repeated NamedParameter additional_properties = 1;
+}
+
+message ParametersItem {
+  oneof oneof {
+    Parameter parameter = 1;
+    JsonReference json_reference = 2;
+  }
+}
+
+message PathItem {
+  string _ref = 1;
+  Operation get = 2;
+  Operation put = 3;
+  Operation post = 4;
+  Operation delete = 5;
+  Operation options = 6;
+  Operation head = 7;
+  Operation patch = 8;
+  // The parameters needed to send a valid API call.
+  repeated ParametersItem parameters = 9;
+  repeated NamedAny vendor_extension = 10;
+}
+
+message PathParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  string type = 5;
+  string format = 6;
+  PrimitivesItems items = 7;
+  string collection_format = 8;
+  Any default = 9;
+  double maximum = 10;
+  bool exclusive_maximum = 11;
+  double minimum = 12;
+  bool exclusive_minimum = 13;
+  int64 max_length = 14;
+  int64 min_length = 15;
+  string pattern = 16;
+  int64 max_items = 17;
+  int64 min_items = 18;
+  bool unique_items = 19;
+  repeated Any enum = 20;
+  double multiple_of = 21;
+  repeated NamedAny vendor_extension = 22;
+}
+
+// Relative paths to the individual endpoints. They must be relative to the 'basePath'.
+message Paths {
+  repeated NamedAny vendor_extension = 1;
+  repeated NamedPathItem path = 2;
+}
+
+message PrimitivesItems {
+  string type = 1;
+  string format = 2;
+  PrimitivesItems items = 3;
+  string collection_format = 4;
+  Any default = 5;
+  double maximum = 6;
+  bool exclusive_maximum = 7;
+  double minimum = 8;
+  bool exclusive_minimum = 9;
+  int64 max_length = 10;
+  int64 min_length = 11;
+  string pattern = 12;
+  int64 max_items = 13;
+  int64 min_items = 14;
+  bool unique_items = 15;
+  repeated Any enum = 16;
+  double multiple_of = 17;
+  repeated NamedAny vendor_extension = 18;
+}
+
+message Properties {
+  repeated NamedSchema additional_properties = 1;
+}
+
+message QueryParameterSubSchema {
+  // Determines whether or not this parameter is required or optional.
+  bool required = 1;
+  // Determines the location of the parameter.
+  string in = 2;
+  // A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed.
+  string description = 3;
+  // The name of the parameter.
+  string name = 4;
+  // allows sending a parameter by name only or with an empty value.
+  bool allow_empty_value = 5;
+  string type = 6;
+  string format = 7;
+  PrimitivesItems items = 8;
+  string collection_format = 9;
+  Any default = 10;
+  double maximum = 11;
+  bool exclusive_maximum = 12;
+  double minimum = 13;
+  bool exclusive_minimum = 14;
+  int64 max_length = 15;
+  int64 min_length = 16;
+  string pattern = 17;
+  int64 max_items = 18;
+  int64 min_items = 19;
+  bool unique_items = 20;
+  repeated Any enum = 21;
+  double multiple_of = 22;
+  repeated NamedAny vendor_extension = 23;
+}
+
+message Response {
+  string description = 1;
+  SchemaItem schema = 2;
+  Headers headers = 3;
+  Examples examples = 4;
+  repeated NamedAny vendor_extension = 5;
+}
+
+// One or more JSON representations for parameters
+message ResponseDefinitions {
+  repeated NamedResponse additional_properties = 1;
+}
+
+message ResponseValue {
+  oneof oneof {
+    Response response = 1;
+    JsonReference json_reference = 2;
+  }
+}
+
+// Response objects names can either be any valid HTTP status code or 'default'.
+message Responses {
+  repeated NamedResponseValue response_code = 1;
+  repeated NamedAny vendor_extension = 2;
+}
+
+// A deterministic version of a JSON Schema object.
+message Schema {
+  string _ref = 1;
+  string format = 2;
+  string title = 3;
+  string description = 4;
+  Any default = 5;
+  double multiple_of = 6;
+  double maximum = 7;
+  bool exclusive_maximum = 8;
+  double minimum = 9;
+  bool exclusive_minimum = 10;
+  int64 max_length = 11;
+  int64 min_length = 12;
+  string pattern = 13;
+  int64 max_items = 14;
+  int64 min_items = 15;
+  bool unique_items = 16;
+  int64 max_properties = 17;
+  int64 min_properties = 18;
+  repeated string required = 19;
+  repeated Any enum = 20;
+  AdditionalPropertiesItem additional_properties = 21;
+  TypeItem type = 22;
+  ItemsItem items = 23;
+  repeated Schema all_of = 24;
+  Properties properties = 25;
+  string discriminator = 26;
+  bool read_only = 27;
+  Xml xml = 28;
+  ExternalDocs external_docs = 29;
+  Any example = 30;
+  repeated NamedAny vendor_extension = 31;
+}
+
+message SchemaItem {
+  oneof oneof {
+    Schema schema = 1;
+    FileSchema file_schema = 2;
+  }
+}
+
+message SecurityDefinitions {
+  repeated NamedSecurityDefinitionsItem additional_properties = 1;
+}
+
+message SecurityDefinitionsItem {
+  oneof oneof {
+    BasicAuthenticationSecurity basic_authentication_security = 1;
+    ApiKeySecurity api_key_security = 2;
+    Oauth2ImplicitSecurity oauth2_implicit_security = 3;
+    Oauth2PasswordSecurity oauth2_password_security = 4;
+    Oauth2ApplicationSecurity oauth2_application_security = 5;
+    Oauth2AccessCodeSecurity oauth2_access_code_security = 6;
+  }
+}
+
+message SecurityRequirement {
+  repeated NamedStringArray additional_properties = 1;
+}
+
+message StringArray {
+  repeated string value = 1;
+}
+
+message Tag {
+  string name = 1;
+  string description = 2;
+  ExternalDocs external_docs = 3;
+  repeated NamedAny vendor_extension = 4;
+}
+
+message TypeItem {
+  repeated string value = 1;
+}
+
+// Any property starting with x- is valid.
+message VendorExtension {
+  repeated NamedAny additional_properties = 1;
+}
+
+message Xml {
+  string name = 1;
+  string namespace = 2;
+  string prefix = 3;
+  bool attribute = 4;
+  bool wrapped = 5;
+  repeated NamedAny vendor_extension = 6;
+}
+
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
new file mode 100644
index 0000000..836fb32
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md
@@ -0,0 +1,16 @@
+# OpenAPI v2 Protocol Buffer Models
+
+This directory contains a Protocol Buffer-language model
+and related code for supporting OpenAPI v2.
+
+Gnostic applications and plugins can use OpenAPIv2.proto
+to generate Protocol Buffer support code for their preferred languages.
+
+OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI 
+descriptions into the Protocol Buffer-based datastructures 
+generated from OpenAPIv2.proto.
+
+OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic 
+compiler generator, and OpenAPIv2.pb.go is generated by 
+protoc, the Protocol Buffer compiler, and protoc-gen-go, the
+Protocol Buffer Go code generation plugin.
diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
new file mode 100644
index 0000000..2815a26
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json
@@ -0,0 +1,1610 @@
+{
+  "title": "A JSON Schema for Swagger 2.0 API.",
+  "id": "http://swagger.io/v2/schema.json#",
+  "$schema": "http://json-schema.org/draft-04/schema#",
+  "type": "object",
+  "required": [
+    "swagger",
+    "info",
+    "paths"
+  ],
+  "additionalProperties": false,
+  "patternProperties": {
+    "^x-": {
+      "$ref": "#/definitions/vendorExtension"
+    }
+  },
+  "properties": {
+    "swagger": {
+      "type": "string",
+      "enum": [
+        "2.0"
+      ],
+      "description": "The Swagger version of this document."
+    },
+    "info": {
+      "$ref": "#/definitions/info"
+    },
+    "host": {
+      "type": "string",
+      "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+      "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+    },
+    "basePath": {
+      "type": "string",
+      "pattern": "^/",
+      "description": "The base path to the API. Example: '/api'."
+    },
+    "schemes": {
+      "$ref": "#/definitions/schemesList"
+    },
+    "consumes": {
+      "description": "A list of MIME types accepted by the API.",
+      "allOf": [
+        {
+          "$ref": "#/definitions/mediaTypeList"
+        }
+      ]
+    },
+    "produces": {
+      "description": "A list of MIME types the API can produce.",
+      "allOf": [
+        {
+          "$ref": "#/definitions/mediaTypeList"
+        }
+      ]
+    },
+    "paths": {
+      "$ref": "#/definitions/paths"
+    },
+    "definitions": {
+      "$ref": "#/definitions/definitions"
+    },
+    "parameters": {
+      "$ref": "#/definitions/parameterDefinitions"
+    },
+    "responses": {
+      "$ref": "#/definitions/responseDefinitions"
+    },
+    "security": {
+      "$ref": "#/definitions/security"
+    },
+    "securityDefinitions": {
+      "$ref": "#/definitions/securityDefinitions"
+    },
+    "tags": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/tag"
+      },
+      "uniqueItems": true
+    },
+    "externalDocs": {
+      "$ref": "#/definitions/externalDocs"
+    }
+  },
+  "definitions": {
+    "info": {
+      "type": "object",
+      "description": "General information about the API.",
+      "required": [
+        "version",
+        "title"
+      ],
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "title": {
+          "type": "string",
+          "description": "A unique and precise title of the API."
+        },
+        "version": {
+          "type": "string",
+          "description": "A semantic version number of the API."
+        },
+        "description": {
+          "type": "string",
+          "description": "A longer description of the API. Should be different from the title.  GitHub Flavored Markdown is allowed."
+        },
+        "termsOfService": {
+          "type": "string",
+          "description": "The terms of service for the API."
+        },
+        "contact": {
+          "$ref": "#/definitions/contact"
+        },
+        "license": {
+          "$ref": "#/definitions/license"
+        }
+      }
+    },
+    "contact": {
+      "type": "object",
+      "description": "Contact information for the owners of the API.",
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The identifying name of the contact person/organization."
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL pointing to the contact information.",
+          "format": "uri"
+        },
+        "email": {
+          "type": "string",
+          "description": "The email address of the contact person/organization.",
+          "format": "email"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "license": {
+      "type": "object",
+      "required": [
+        "name"
+      ],
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL pointing to the license.",
+          "format": "uri"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "paths": {
+      "type": "object",
+      "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        },
+        "^/": {
+          "$ref": "#/definitions/pathItem"
+        }
+      },
+      "additionalProperties": false
+    },
+    "definitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/schema"
+      },
+      "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+    },
+    "parameterDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/parameter"
+      },
+      "description": "One or more JSON representations for parameters"
+    },
+    "responseDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/response"
+      },
+      "description": "One or more JSON representations for parameters"
+    },
+    "externalDocs": {
+      "type": "object",
+      "additionalProperties": false,
+      "description": "information about external documentation",
+      "required": [
+        "url"
+      ],
+      "properties": {
+        "description": {
+          "type": "string"
+        },
+        "url": {
+          "type": "string",
+          "format": "uri"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "examples": {
+      "type": "object",
+      "additionalProperties": true
+    },
+    "mimeType": {
+      "type": "string",
+      "description": "The MIME type of the HTTP message."
+    },
+    "operation": {
+      "type": "object",
+      "required": [
+        "responses"
+      ],
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "tags": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          },
+          "uniqueItems": true
+        },
+        "summary": {
+          "type": "string",
+          "description": "A brief summary of the operation."
+        },
+        "description": {
+          "type": "string",
+          "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "operationId": {
+          "type": "string",
+          "description": "A unique identifier of the operation."
+        },
+        "produces": {
+          "description": "A list of MIME types the API can produce.",
+          "allOf": [
+            {
+              "$ref": "#/definitions/mediaTypeList"
+            }
+          ]
+        },
+        "consumes": {
+          "description": "A list of MIME types the API can consume.",
+          "allOf": [
+            {
+              "$ref": "#/definitions/mediaTypeList"
+            }
+          ]
+        },
+        "parameters": {
+          "$ref": "#/definitions/parametersList"
+        },
+        "responses": {
+          "$ref": "#/definitions/responses"
+        },
+        "schemes": {
+          "$ref": "#/definitions/schemesList"
+        },
+        "deprecated": {
+          "type": "boolean",
+          "default": false
+        },
+        "security": {
+          "$ref": "#/definitions/security"
+        }
+      }
+    },
+    "pathItem": {
+      "type": "object",
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "get": {
+          "$ref": "#/definitions/operation"
+        },
+        "put": {
+          "$ref": "#/definitions/operation"
+        },
+        "post": {
+          "$ref": "#/definitions/operation"
+        },
+        "delete": {
+          "$ref": "#/definitions/operation"
+        },
+        "options": {
+          "$ref": "#/definitions/operation"
+        },
+        "head": {
+          "$ref": "#/definitions/operation"
+        },
+        "patch": {
+          "$ref": "#/definitions/operation"
+        },
+        "parameters": {
+          "$ref": "#/definitions/parametersList"
+        }
+      }
+    },
+    "responses": {
+      "type": "object",
+      "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+      "minProperties": 1,
+      "additionalProperties": false,
+      "patternProperties": {
+        "^([0-9]{3})$|^(default)$": {
+          "$ref": "#/definitions/responseValue"
+        },
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "not": {
+        "type": "object",
+        "additionalProperties": false,
+        "patternProperties": {
+          "^x-": {
+            "$ref": "#/definitions/vendorExtension"
+          }
+        }
+      }
+    },
+    "responseValue": {
+      "oneOf": [
+        {
+          "$ref": "#/definitions/response"
+        },
+        {
+          "$ref": "#/definitions/jsonReference"
+        }
+      ]
+    },
+    "response": {
+      "type": "object",
+      "required": [
+        "description"
+      ],
+      "properties": {
+        "description": {
+          "type": "string"
+        },
+        "schema": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "$ref": "#/definitions/fileSchema"
+            }
+          ]
+        },
+        "headers": {
+          "$ref": "#/definitions/headers"
+        },
+        "examples": {
+          "$ref": "#/definitions/examples"
+        }
+      },
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "headers": {
+      "type": "object",
+      "additionalProperties": {
+        "$ref": "#/definitions/header"
+      }
+    },
+    "header": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "integer",
+            "boolean",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "vendorExtension": {
+      "description": "Any property starting with x- is valid.",
+      "additionalProperties": true,
+      "additionalItems": true
+    },
+    "bodyParameter": {
+      "type": "object",
+      "required": [
+        "name",
+        "in",
+        "schema"
+      ],
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "body"
+          ]
+        },
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "schema": {
+          "$ref": "#/definitions/schema"
+        }
+      },
+      "additionalProperties": false
+    },
+    "headerParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "header"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "queryParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "query"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "allowEmptyValue": {
+          "type": "boolean",
+          "default": false,
+          "description": "allows sending a parameter by name only or with an empty value."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormatWithMulti"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "formDataParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "description": "Determines whether or not this parameter is required or optional.",
+          "default": false
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "formData"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "allowEmptyValue": {
+          "type": "boolean",
+          "default": false,
+          "description": "allows sending a parameter by name only or with an empty value."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array",
+            "file"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormatWithMulti"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "pathParameterSubSchema": {
+      "additionalProperties": false,
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "required": [
+        "required"
+      ],
+      "properties": {
+        "required": {
+          "type": "boolean",
+          "enum": [
+            true
+          ],
+          "description": "Determines whether or not this parameter is required or optional."
+        },
+        "in": {
+          "type": "string",
+          "description": "Determines the location of the parameter.",
+          "enum": [
+            "path"
+          ]
+        },
+        "description": {
+          "type": "string",
+          "description": "A brief description of the parameter. This could contain examples of use.  GitHub Flavored Markdown is allowed."
+        },
+        "name": {
+          "type": "string",
+          "description": "The name of the parameter."
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "boolean",
+            "integer",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      }
+    },
+    "nonBodyParameter": {
+      "type": "object",
+      "required": [
+        "name",
+        "in",
+        "type"
+      ],
+      "oneOf": [
+        {
+          "$ref": "#/definitions/headerParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/formDataParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/queryParameterSubSchema"
+        },
+        {
+          "$ref": "#/definitions/pathParameterSubSchema"
+        }
+      ]
+    },
+    "parameter": {
+      "oneOf": [
+        {
+          "$ref": "#/definitions/bodyParameter"
+        },
+        {
+          "$ref": "#/definitions/nonBodyParameter"
+        }
+      ]
+    },
+    "schema": {
+      "type": "object",
+      "description": "A deterministic version of a JSON Schema object.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "format": {
+          "type": "string"
+        },
+        "title": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+        },
+        "description": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+        },
+        "default": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+        },
+        "multipleOf": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+        },
+        "maximum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minLength": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "pattern": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+        },
+        "maxItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "uniqueItems": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+        },
+        "maxProperties": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+        },
+        "minProperties": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+        },
+        "required": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+        },
+        "enum": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+        },
+        "additionalProperties": {
+          "oneOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "type": "boolean"
+            }
+          ],
+          "default": {}
+        },
+        "type": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+        },
+        "items": {
+          "anyOf": [
+            {
+              "$ref": "#/definitions/schema"
+            },
+            {
+              "type": "array",
+              "minItems": 1,
+              "items": {
+                "$ref": "#/definitions/schema"
+              }
+            }
+          ],
+          "default": {}
+        },
+        "allOf": {
+          "type": "array",
+          "minItems": 1,
+          "items": {
+            "$ref": "#/definitions/schema"
+          }
+        },
+        "properties": {
+          "type": "object",
+          "additionalProperties": {
+            "$ref": "#/definitions/schema"
+          },
+          "default": {}
+        },
+        "discriminator": {
+          "type": "string"
+        },
+        "readOnly": {
+          "type": "boolean",
+          "default": false
+        },
+        "xml": {
+          "$ref": "#/definitions/xml"
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "example": {}
+      },
+      "additionalProperties": false
+    },
+    "fileSchema": {
+      "type": "object",
+      "description": "A deterministic version of a JSON Schema object.",
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      },
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "format": {
+          "type": "string"
+        },
+        "title": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+        },
+        "description": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+        },
+        "default": {
+          "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+        },
+        "required": {
+          "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+        },
+        "type": {
+          "type": "string",
+          "enum": [
+            "file"
+          ]
+        },
+        "readOnly": {
+          "type": "boolean",
+          "default": false
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        },
+        "example": {}
+      },
+      "additionalProperties": false
+    },
+    "primitivesItems": {
+      "type": "object",
+      "additionalProperties": false,
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "string",
+            "number",
+            "integer",
+            "boolean",
+            "array"
+          ]
+        },
+        "format": {
+          "type": "string"
+        },
+        "items": {
+          "$ref": "#/definitions/primitivesItems"
+        },
+        "collectionFormat": {
+          "$ref": "#/definitions/collectionFormat"
+        },
+        "default": {
+          "$ref": "#/definitions/default"
+        },
+        "maximum": {
+          "$ref": "#/definitions/maximum"
+        },
+        "exclusiveMaximum": {
+          "$ref": "#/definitions/exclusiveMaximum"
+        },
+        "minimum": {
+          "$ref": "#/definitions/minimum"
+        },
+        "exclusiveMinimum": {
+          "$ref": "#/definitions/exclusiveMinimum"
+        },
+        "maxLength": {
+          "$ref": "#/definitions/maxLength"
+        },
+        "minLength": {
+          "$ref": "#/definitions/minLength"
+        },
+        "pattern": {
+          "$ref": "#/definitions/pattern"
+        },
+        "maxItems": {
+          "$ref": "#/definitions/maxItems"
+        },
+        "minItems": {
+          "$ref": "#/definitions/minItems"
+        },
+        "uniqueItems": {
+          "$ref": "#/definitions/uniqueItems"
+        },
+        "enum": {
+          "$ref": "#/definitions/enum"
+        },
+        "multipleOf": {
+          "$ref": "#/definitions/multipleOf"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "security": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/securityRequirement"
+      },
+      "uniqueItems": true
+    },
+    "securityRequirement": {
+      "type": "object",
+      "additionalProperties": {
+        "type": "array",
+        "items": {
+          "type": "string"
+        },
+        "uniqueItems": true
+      }
+    },
+    "xml": {
+      "type": "object",
+      "additionalProperties": false,
+      "properties": {
+        "name": {
+          "type": "string"
+        },
+        "namespace": {
+          "type": "string"
+        },
+        "prefix": {
+          "type": "string"
+        },
+        "attribute": {
+          "type": "boolean",
+          "default": false
+        },
+        "wrapped": {
+          "type": "boolean",
+          "default": false
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "tag": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "name"
+      ],
+      "properties": {
+        "name": {
+          "type": "string"
+        },
+        "description": {
+          "type": "string"
+        },
+        "externalDocs": {
+          "$ref": "#/definitions/externalDocs"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "securityDefinitions": {
+      "type": "object",
+      "additionalProperties": {
+        "oneOf": [
+          {
+            "$ref": "#/definitions/basicAuthenticationSecurity"
+          },
+          {
+            "$ref": "#/definitions/apiKeySecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2ImplicitSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2PasswordSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2ApplicationSecurity"
+          },
+          {
+            "$ref": "#/definitions/oauth2AccessCodeSecurity"
+          }
+        ]
+      }
+    },
+    "basicAuthenticationSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "basic"
+          ]
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "apiKeySecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "name",
+        "in"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "apiKey"
+          ]
+        },
+        "name": {
+          "type": "string"
+        },
+        "in": {
+          "type": "string",
+          "enum": [
+            "header",
+            "query"
+          ]
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2ImplicitSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "authorizationUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "implicit"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "authorizationUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2PasswordSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "password"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2ApplicationSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "application"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2AccessCodeSecurity": {
+      "type": "object",
+      "additionalProperties": false,
+      "required": [
+        "type",
+        "flow",
+        "authorizationUrl",
+        "tokenUrl"
+      ],
+      "properties": {
+        "type": {
+          "type": "string",
+          "enum": [
+            "oauth2"
+          ]
+        },
+        "flow": {
+          "type": "string",
+          "enum": [
+            "accessCode"
+          ]
+        },
+        "scopes": {
+          "$ref": "#/definitions/oauth2Scopes"
+        },
+        "authorizationUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "tokenUrl": {
+          "type": "string",
+          "format": "uri"
+        },
+        "description": {
+          "type": "string"
+        }
+      },
+      "patternProperties": {
+        "^x-": {
+          "$ref": "#/definitions/vendorExtension"
+        }
+      }
+    },
+    "oauth2Scopes": {
+      "type": "object",
+      "additionalProperties": {
+        "type": "string"
+      }
+    },
+    "mediaTypeList": {
+      "type": "array",
+      "items": {
+        "$ref": "#/definitions/mimeType"
+      },
+      "uniqueItems": true
+    },
+    "parametersList": {
+      "type": "array",
+      "description": "The parameters needed to send a valid API call.",
+      "additionalItems": false,
+      "items": {
+        "oneOf": [
+          {
+            "$ref": "#/definitions/parameter"
+          },
+          {
+            "$ref": "#/definitions/jsonReference"
+          }
+        ]
+      },
+      "uniqueItems": true
+    },
+    "schemesList": {
+      "type": "array",
+      "description": "The transfer protocol of the API.",
+      "items": {
+        "type": "string",
+        "enum": [
+          "http",
+          "https",
+          "ws",
+          "wss"
+        ]
+      },
+      "uniqueItems": true
+    },
+    "collectionFormat": {
+      "type": "string",
+      "enum": [
+        "csv",
+        "ssv",
+        "tsv",
+        "pipes"
+      ],
+      "default": "csv"
+    },
+    "collectionFormatWithMulti": {
+      "type": "string",
+      "enum": [
+        "csv",
+        "ssv",
+        "tsv",
+        "pipes",
+        "multi"
+      ],
+      "default": "csv"
+    },
+    "title": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+    },
+    "description": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+    },
+    "default": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+    },
+    "multipleOf": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+    },
+    "maximum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+    },
+    "exclusiveMaximum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+    },
+    "minimum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+    },
+    "exclusiveMinimum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+    },
+    "maxLength": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+    },
+    "minLength": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+    },
+    "pattern": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+    },
+    "maxItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+    },
+    "minItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+    },
+    "uniqueItems": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+    },
+    "enum": {
+      "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+    },
+    "jsonReference": {
+      "type": "object",
+      "required": [
+        "$ref"
+      ],
+      "additionalProperties": false,
+      "properties": {
+        "$ref": {
+          "type": "string"
+        },
+        "description": {
+          "type": "string"
+        }
+      }
+    }
+  }
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md
new file mode 100644
index 0000000..848b16c
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/README.md
@@ -0,0 +1,3 @@
+# Compiler support code
+
+This directory contains compiler support code used by Gnostic and Gnostic extensions.
\ No newline at end of file
diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go
new file mode 100644
index 0000000..a64c1b7
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/context.go
@@ -0,0 +1,43 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+// Context contains state of the compiler as it traverses a document.
+type Context struct {
+	Parent            *Context
+	Name              string
+	ExtensionHandlers *[]ExtensionHandler
+}
+
+// NewContextWithExtensions returns a new object representing the compiler state
+func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context {
+	return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers}
+}
+
+// NewContext returns a new object representing the compiler state
+func NewContext(name string, parent *Context) *Context {
+	if parent != nil {
+		return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers}
+	}
+	return &Context{Name: name, Parent: parent, ExtensionHandlers: nil}
+}
+
+// Description returns a text description of the compiler state
+func (context *Context) Description() string {
+	if context.Parent != nil {
+		return context.Parent.Description() + "." + context.Name
+	}
+	return context.Name
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go
new file mode 100644
index 0000000..d8672c1
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/error.go
@@ -0,0 +1,61 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+// Error represents compiler errors and their location in the document.
+type Error struct {
+	Context *Context
+	Message string
+}
+
+// NewError creates an Error.
+func NewError(context *Context, message string) *Error {
+	return &Error{Context: context, Message: message}
+}
+
+// Error returns the string value of an Error.
+func (err *Error) Error() string {
+	if err.Context == nil {
+		return "ERROR " + err.Message
+	}
+	return "ERROR " + err.Context.Description() + " " + err.Message
+}
+
+// ErrorGroup is a container for groups of Error values.
+type ErrorGroup struct {
+	Errors []error
+}
+
+// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty.
+func NewErrorGroupOrNil(errors []error) error {
+	if len(errors) == 0 {
+		return nil
+	} else if len(errors) == 1 {
+		return errors[0]
+	} else {
+		return &ErrorGroup{Errors: errors}
+	}
+}
+
+func (group *ErrorGroup) Error() string {
+	result := ""
+	for i, err := range group.Errors {
+		if i > 0 {
+			result += "\n"
+		}
+		result += err.Error()
+	}
+	return result
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
new file mode 100644
index 0000000..1f85b65
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go
@@ -0,0 +1,101 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"bytes"
+	"fmt"
+	"os/exec"
+
+	"strings"
+
+	"errors"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	ext_plugin "github.com/googleapis/gnostic/extensions"
+	yaml "gopkg.in/yaml.v2"
+)
+
+// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions.
+type ExtensionHandler struct {
+	Name string
+}
+
+// HandleExtension calls a binary extension handler.
+func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) {
+	handled := false
+	var errFromPlugin error
+	var outFromPlugin *any.Any
+
+	if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 {
+		for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) {
+			outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName)
+			if outFromPlugin == nil {
+				continue
+			} else {
+				handled = true
+				break
+			}
+		}
+	}
+	return handled, outFromPlugin, errFromPlugin
+}
+
+func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) {
+	if extensionHandlers.Name != "" {
+		binary, _ := yaml.Marshal(in)
+
+		request := &ext_plugin.ExtensionHandlerRequest{}
+
+		version := &ext_plugin.Version{}
+		version.Major = 0
+		version.Minor = 1
+		version.Patch = 0
+		request.CompilerVersion = version
+
+		request.Wrapper = &ext_plugin.Wrapper{}
+
+		request.Wrapper.Version = "v2"
+		request.Wrapper.Yaml = string(binary)
+		request.Wrapper.ExtensionName = extensionName
+
+		requestBytes, _ := proto.Marshal(request)
+		cmd := exec.Command(extensionHandlers.Name)
+		cmd.Stdin = bytes.NewReader(requestBytes)
+		output, err := cmd.Output()
+
+		if err != nil {
+			fmt.Printf("Error: %+v\n", err)
+			return nil, err
+		}
+		response := &ext_plugin.ExtensionHandlerResponse{}
+		err = proto.Unmarshal(output, response)
+		if err != nil {
+			fmt.Printf("Error: %+v\n", err)
+			fmt.Printf("%s\n", string(output))
+			return nil, err
+		}
+		if !response.Handled {
+			return nil, nil
+		}
+		if len(response.Error) != 0 {
+			message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ","))
+			return nil, errors.New(message)
+		}
+		return response.Value, nil
+	}
+	return nil, nil
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
new file mode 100644
index 0000000..76df635
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go
@@ -0,0 +1,197 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"fmt"
+	"gopkg.in/yaml.v2"
+	"regexp"
+	"sort"
+	"strconv"
+)
+
+// compiler helper functions, usually called from generated code
+
+// UnpackMap gets a yaml.MapSlice if possible.
+func UnpackMap(in interface{}) (yaml.MapSlice, bool) {
+	m, ok := in.(yaml.MapSlice)
+	if ok {
+		return m, true
+	}
+	// do we have an empty array?
+	a, ok := in.([]interface{})
+	if ok && len(a) == 0 {
+		// if so, return an empty map
+		return yaml.MapSlice{}, true
+	}
+	return nil, false
+}
+
+// SortedKeysForMap returns the sorted keys of a yaml.MapSlice.
+func SortedKeysForMap(m yaml.MapSlice) []string {
+	keys := make([]string, 0)
+	for _, item := range m {
+		keys = append(keys, item.Key.(string))
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// MapHasKey returns true if a yaml.MapSlice contains a specified key.
+func MapHasKey(m yaml.MapSlice, key string) bool {
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok && key == itemKey {
+			return true
+		}
+	}
+	return false
+}
+
+// MapValueForKey gets the value of a map value for a specified key.
+func MapValueForKey(m yaml.MapSlice, key string) interface{} {
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok && key == itemKey {
+			return item.Value
+		}
+	}
+	return nil
+}
+
+// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible.
+func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string {
+	stringArray := make([]string, 0)
+	for _, item := range interfaceArray {
+		v, ok := item.(string)
+		if ok {
+			stringArray = append(stringArray, v)
+		}
+	}
+	return stringArray
+}
+
+// MissingKeysInMap identifies which keys from a list of required keys are not in a map.
+func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string {
+	missingKeys := make([]string, 0)
+	for _, k := range requiredKeys {
+		if !MapHasKey(m, k) {
+			missingKeys = append(missingKeys, k)
+		}
+	}
+	return missingKeys
+}
+
+// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns.
+func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string {
+	invalidKeys := make([]string, 0)
+	for _, item := range m {
+		itemKey, ok := item.Key.(string)
+		if ok {
+			key := itemKey
+			found := false
+			// does the key match an allowed key?
+			for _, allowedKey := range allowedKeys {
+				if key == allowedKey {
+					found = true
+					break
+				}
+			}
+			if !found {
+				// does the key match an allowed pattern?
+				for _, allowedPattern := range allowedPatterns {
+					if allowedPattern.MatchString(key) {
+						found = true
+						break
+					}
+				}
+				if !found {
+					invalidKeys = append(invalidKeys, key)
+				}
+			}
+		}
+	}
+	return invalidKeys
+}
+
+// DescribeMap describes a map (for debugging purposes).
+func DescribeMap(in interface{}, indent string) string {
+	description := ""
+	m, ok := in.(map[string]interface{})
+	if ok {
+		keys := make([]string, 0)
+		for k := range m {
+			keys = append(keys, k)
+		}
+		sort.Strings(keys)
+		for _, k := range keys {
+			v := m[k]
+			description += fmt.Sprintf("%s%s:\n", indent, k)
+			description += DescribeMap(v, indent+"  ")
+		}
+		return description
+	}
+	a, ok := in.([]interface{})
+	if ok {
+		for i, v := range a {
+			description += fmt.Sprintf("%s%d:\n", indent, i)
+			description += DescribeMap(v, indent+"  ")
+		}
+		return description
+	}
+	description += fmt.Sprintf("%s%+v\n", indent, in)
+	return description
+}
+
+// PluralProperties returns the string "properties" pluralized.
+func PluralProperties(count int) string {
+	if count == 1 {
+		return "property"
+	}
+	return "properties"
+}
+
+// StringArrayContainsValue returns true if a string array contains a specified value.
+func StringArrayContainsValue(array []string, value string) bool {
+	for _, item := range array {
+		if item == value {
+			return true
+		}
+	}
+	return false
+}
+
+// StringArrayContainsValues returns true if a string array contains all of a list of specified values.
+func StringArrayContainsValues(array []string, values []string) bool {
+	for _, value := range values {
+		if !StringArrayContainsValue(array, value) {
+			return false
+		}
+	}
+	return true
+}
+
+// StringValue returns the string value of an item.
+func StringValue(item interface{}) (value string, ok bool) {
+	value, ok = item.(string)
+	if ok {
+		return value, ok
+	}
+	intValue, ok := item.(int)
+	if ok {
+		return strconv.Itoa(intValue), true
+	}
+	return "", false
+}
diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go
new file mode 100644
index 0000000..9713a21
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/main.go
@@ -0,0 +1,16 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package compiler provides support functions to generated compiler code.
+package compiler
diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go
new file mode 100644
index 0000000..af10e0d
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go
@@ -0,0 +1,209 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package compiler
+
+import (
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"log"
+	"net/http"
+	"net/url"
+	"path/filepath"
+	"strings"
+
+	yaml "gopkg.in/yaml.v2"
+)
+
+var fileCache map[string][]byte
+var infoCache map[string]interface{}
+var count int64
+
+var verboseReader = false
+var fileCacheEnable = true
+var infoCacheEnable = true
+
+func initializeFileCache() {
+	if fileCache == nil {
+		fileCache = make(map[string][]byte, 0)
+	}
+}
+
+func initializeInfoCache() {
+	if infoCache == nil {
+		infoCache = make(map[string]interface{}, 0)
+	}
+}
+
+func DisableFileCache() {
+	fileCacheEnable = false
+}
+
+func DisableInfoCache() {
+	infoCacheEnable = false
+}
+
+func RemoveFromFileCache(fileurl string) {
+	if !fileCacheEnable {
+		return
+	}
+	initializeFileCache()
+	delete(fileCache, fileurl)
+}
+
+func RemoveFromInfoCache(filename string) {
+	if !infoCacheEnable {
+		return
+	}
+	initializeInfoCache()
+	delete(infoCache, filename)
+}
+
+// FetchFile gets a specified file from the local filesystem or a remote location.
+func FetchFile(fileurl string) ([]byte, error) {
+	var bytes []byte
+	initializeFileCache()
+	if fileCacheEnable {
+		bytes, ok := fileCache[fileurl]
+		if ok {
+			if verboseReader {
+				log.Printf("Cache hit %s", fileurl)
+			}
+			return bytes, nil
+		}
+		if verboseReader {
+			log.Printf("Fetching %s", fileurl)
+		}
+	}
+	response, err := http.Get(fileurl)
+	if err != nil {
+		return nil, err
+	}
+	defer response.Body.Close()
+	if response.StatusCode != 200 {
+		return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status))
+	}
+	bytes, err = ioutil.ReadAll(response.Body)
+	if fileCacheEnable && err == nil {
+		fileCache[fileurl] = bytes
+	}
+	return bytes, err
+}
+
+// ReadBytesForFile reads the bytes of a file.
+func ReadBytesForFile(filename string) ([]byte, error) {
+	// is the filename a url?
+	fileurl, _ := url.Parse(filename)
+	if fileurl.Scheme != "" {
+		// yes, fetch it
+		bytes, err := FetchFile(filename)
+		if err != nil {
+			return nil, err
+		}
+		return bytes, nil
+	}
+	// no, it's a local filename
+	bytes, err := ioutil.ReadFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	return bytes, nil
+}
+
+// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice.
+func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) {
+	initializeInfoCache()
+	if infoCacheEnable {
+		cachedInfo, ok := infoCache[filename]
+		if ok {
+			if verboseReader {
+				log.Printf("Cache hit info for file %s", filename)
+			}
+			return cachedInfo, nil
+		}
+		if verboseReader {
+			log.Printf("Reading info for file %s", filename)
+		}
+	}
+	var info yaml.MapSlice
+	err := yaml.Unmarshal(bytes, &info)
+	if err != nil {
+		return nil, err
+	}
+	if infoCacheEnable && len(filename) > 0 {
+		infoCache[filename] = info
+	}
+	return info, nil
+}
+
+// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref.
+func ReadInfoForRef(basefile string, ref string) (interface{}, error) {
+	initializeInfoCache()
+	if infoCacheEnable {
+		info, ok := infoCache[ref]
+		if ok {
+			if verboseReader {
+				log.Printf("Cache hit for ref %s#%s", basefile, ref)
+			}
+			return info, nil
+		}
+		if verboseReader {
+			log.Printf("Reading info for ref %s#%s", basefile, ref)
+		}
+	}
+	count = count + 1
+	basedir, _ := filepath.Split(basefile)
+	parts := strings.Split(ref, "#")
+	var filename string
+	if parts[0] != "" {
+		filename = basedir + parts[0]
+	} else {
+		filename = basefile
+	}
+	bytes, err := ReadBytesForFile(filename)
+	if err != nil {
+		return nil, err
+	}
+	info, err := ReadInfoFromBytes(filename, bytes)
+	if err != nil {
+		log.Printf("File error: %v\n", err)
+	} else {
+		if len(parts) > 1 {
+			path := strings.Split(parts[1], "/")
+			for i, key := range path {
+				if i > 0 {
+					m, ok := info.(yaml.MapSlice)
+					if ok {
+						found := false
+						for _, section := range m {
+							if section.Key == key {
+								info = section.Value
+								found = true
+							}
+						}
+						if !found {
+							infoCache[ref] = nil
+							return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref))
+						}
+					}
+				}
+			}
+		}
+	}
+	if infoCacheEnable {
+		infoCache[ref] = info
+	}
+	return info, nil
+}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
new file mode 100755
index 0000000..68d02a0
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
@@ -0,0 +1,5 @@
+go get github.com/golang/protobuf/protoc-gen-go
+
+protoc \
+--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto 
+
diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md
new file mode 100644
index 0000000..ff1c2eb
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/README.md
@@ -0,0 +1,5 @@
+# Extensions
+
+This directory contains support code for building Gnostic extensions and associated examples.
+
+Extensions are used to compile vendor or specification extensions into protocol buffer structures.
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
new file mode 100644
index 0000000..e692789
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go
@@ -0,0 +1,294 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: extension.proto
+
+package openapiextension_v1
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import any "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of OpenAPI compiler.
+type Version struct {
+	Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"`
+	Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"`
+	Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"`
+	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+	// be empty for mainline stable releases.
+	Suffix               string   `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Version) Reset()         { *m = Version{} }
+func (m *Version) String() string { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage()    {}
+func (*Version) Descriptor() ([]byte, []int) {
+	return fileDescriptor_extension_d25f09c742c58c90, []int{0}
+}
+func (m *Version) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Version.Unmarshal(m, b)
+}
+func (m *Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+}
+func (dst *Version) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Version.Merge(dst, src)
+}
+func (m *Version) XXX_Size() int {
+	return xxx_messageInfo_Version.Size(m)
+}
+func (m *Version) XXX_DiscardUnknown() {
+	xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func (m *Version) GetMajor() int32 {
+	if m != nil {
+		return m.Major
+	}
+	return 0
+}
+
+func (m *Version) GetMinor() int32 {
+	if m != nil {
+		return m.Minor
+	}
+	return 0
+}
+
+func (m *Version) GetPatch() int32 {
+	if m != nil {
+		return m.Patch
+	}
+	return 0
+}
+
+func (m *Version) GetSuffix() string {
+	if m != nil {
+		return m.Suffix
+	}
+	return ""
+}
+
+// An encoded Request is written to the ExtensionHandler's stdin.
+type ExtensionHandlerRequest struct {
+	// The OpenAPI descriptions that were explicitly listed on the command line.
+	// The specifications will appear in the order they are specified to gnostic.
+	Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"`
+	// The version number of openapi compiler.
+	CompilerVersion      *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ExtensionHandlerRequest) Reset()         { *m = ExtensionHandlerRequest{} }
+func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) }
+func (*ExtensionHandlerRequest) ProtoMessage()    {}
+func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_extension_d25f09c742c58c90, []int{1}
+}
+func (m *ExtensionHandlerRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionHandlerRequest.Unmarshal(m, b)
+}
+func (m *ExtensionHandlerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionHandlerRequest.Marshal(b, m, deterministic)
+}
+func (dst *ExtensionHandlerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionHandlerRequest.Merge(dst, src)
+}
+func (m *ExtensionHandlerRequest) XXX_Size() int {
+	return xxx_messageInfo_ExtensionHandlerRequest.Size(m)
+}
+func (m *ExtensionHandlerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionHandlerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionHandlerRequest proto.InternalMessageInfo
+
+func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper {
+	if m != nil {
+		return m.Wrapper
+	}
+	return nil
+}
+
+func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version {
+	if m != nil {
+		return m.CompilerVersion
+	}
+	return nil
+}
+
+// The extensions writes an encoded ExtensionHandlerResponse to stdout.
+type ExtensionHandlerResponse struct {
+	// true if the extension is handled by the extension handler; false otherwise
+	Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"`
+	// Error message.  If non-empty, the extension handling failed.
+	// The extension handler process should exit with status code zero
+	// even if it reports an error in this way.
+	//
+	// This should be used to indicate errors which prevent the extension from
+	// operating as intended.  Errors which indicate a problem in gnostic
+	// itself -- such as the input Document being unparseable -- should be
+	// reported by writing a message to stderr and exiting with a non-zero
+	// status code.
+	Error []string `protobuf:"bytes,2,rep,name=error,proto3" json:"error,omitempty"`
+	// text output
+	Value                *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ExtensionHandlerResponse) Reset()         { *m = ExtensionHandlerResponse{} }
+func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) }
+func (*ExtensionHandlerResponse) ProtoMessage()    {}
+func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_extension_d25f09c742c58c90, []int{2}
+}
+func (m *ExtensionHandlerResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionHandlerResponse.Unmarshal(m, b)
+}
+func (m *ExtensionHandlerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionHandlerResponse.Marshal(b, m, deterministic)
+}
+func (dst *ExtensionHandlerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionHandlerResponse.Merge(dst, src)
+}
+func (m *ExtensionHandlerResponse) XXX_Size() int {
+	return xxx_messageInfo_ExtensionHandlerResponse.Size(m)
+}
+func (m *ExtensionHandlerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionHandlerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionHandlerResponse proto.InternalMessageInfo
+
+func (m *ExtensionHandlerResponse) GetHandled() bool {
+	if m != nil {
+		return m.Handled
+	}
+	return false
+}
+
+func (m *ExtensionHandlerResponse) GetError() []string {
+	if m != nil {
+		return m.Error
+	}
+	return nil
+}
+
+func (m *ExtensionHandlerResponse) GetValue() *any.Any {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+type Wrapper struct {
+	// version of the OpenAPI specification in which this extension was written.
+	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	// Name of the extension
+	ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"`
+	// Must be a valid yaml for the proto
+	Yaml                 string   `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Wrapper) Reset()         { *m = Wrapper{} }
+func (m *Wrapper) String() string { return proto.CompactTextString(m) }
+func (*Wrapper) ProtoMessage()    {}
+func (*Wrapper) Descriptor() ([]byte, []int) {
+	return fileDescriptor_extension_d25f09c742c58c90, []int{3}
+}
+func (m *Wrapper) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Wrapper.Unmarshal(m, b)
+}
+func (m *Wrapper) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Wrapper.Marshal(b, m, deterministic)
+}
+func (dst *Wrapper) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Wrapper.Merge(dst, src)
+}
+func (m *Wrapper) XXX_Size() int {
+	return xxx_messageInfo_Wrapper.Size(m)
+}
+func (m *Wrapper) XXX_DiscardUnknown() {
+	xxx_messageInfo_Wrapper.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Wrapper proto.InternalMessageInfo
+
+func (m *Wrapper) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Wrapper) GetExtensionName() string {
+	if m != nil {
+		return m.ExtensionName
+	}
+	return ""
+}
+
+func (m *Wrapper) GetYaml() string {
+	if m != nil {
+		return m.Yaml
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Version)(nil), "openapiextension.v1.Version")
+	proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest")
+	proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse")
+	proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper")
+}
+
+func init() { proto.RegisterFile("extension.proto", fileDescriptor_extension_d25f09c742c58c90) }
+
+var fileDescriptor_extension_d25f09c742c58c90 = []byte{
+	// 357 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40,
+	0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64,
+	0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46,
+	0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7,
+	0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22,
+	0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11,
+	0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18,
+	0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8,
+	0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c,
+	0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69,
+	0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef,
+	0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3,
+	0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87,
+	0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44,
+	0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80,
+	0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4,
+	0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54,
+	0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c,
+	0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e,
+	0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2,
+	0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa,
+	0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b,
+	0x80, 0x51, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
new file mode 100644
index 0000000..04856f9
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto
@@ -0,0 +1,93 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+import "google/protobuf/any.proto";
+package openapiextension.v1;
+
+// This option lets the proto compiler generate Java code inside the package
+// name (see below) instead of inside an outer class. It creates a simpler
+// developer experience by reducing one-level of name nesting and be
+// consistent with most programming languages that don't support outer classes.
+option java_multiple_files = true;
+
+// The Java outer classname should be the filename in UpperCamelCase. This
+// class is only used to hold proto descriptor, so developers don't need to
+// work with it directly.
+option java_outer_classname = "OpenAPIExtensionV1";
+
+// The Java package name must be proto package name with proper prefix.
+option java_package = "org.gnostic.v1";
+
+// A reasonable prefix for the Objective-C symbols generated from the package.
+// It should at a minimum be 3 characters long, all uppercase, and convention
+// is to use an abbreviation of the package name. Something short, but
+// hopefully unique enough to not conflict with things that may come along in
+// the future. 'GPB' is reserved for the protocol buffer implementation itself.
+//
+option objc_class_prefix = "OAE"; // "OpenAPI Extension"
+
+// The version number of OpenAPI compiler.
+message Version {
+  int32 major = 1;
+  int32 minor = 2;
+  int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  string suffix = 4;
+}
+
+// An encoded Request is written to the ExtensionHandler's stdin.
+message ExtensionHandlerRequest {
+
+  // The OpenAPI descriptions that were explicitly listed on the command line.
+  // The specifications will appear in the order they are specified to gnostic.
+  Wrapper wrapper = 1;
+
+  // The version number of openapi compiler.
+  Version compiler_version = 3;
+}
+
+// The extensions writes an encoded ExtensionHandlerResponse to stdout.
+message ExtensionHandlerResponse {
+
+  // true if the extension is handled by the extension handler; false otherwise
+  bool handled = 1;
+
+  // Error message.  If non-empty, the extension handling failed.
+  // The extension handler process should exit with status code zero
+  // even if it reports an error in this way.
+  //
+  // This should be used to indicate errors which prevent the extension from
+  // operating as intended.  Errors which indicate a problem in gnostic
+  // itself -- such as the input Document being unparseable -- should be
+  // reported by writing a message to stderr and exiting with a non-zero
+  // status code.
+  repeated string error = 2;
+
+  // text output
+  google.protobuf.Any value = 3;
+}
+
+message Wrapper {
+  // version of the OpenAPI specification in which this extension was written.
+  string version = 1;
+
+  // Name of the extension
+  string extension_name = 2;
+
+  // Must be a valid yaml for the proto
+  string yaml = 3;
+}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
new file mode 100644
index 0000000..94a8e62
--- /dev/null
+++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go
@@ -0,0 +1,82 @@
+// Copyright 2017 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package openapiextension_v1
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes"
+)
+
+type documentHandler func(version string, extensionName string, document string)
+type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error)
+
+func forInputYamlFromOpenapic(handler documentHandler) {
+	data, err := ioutil.ReadAll(os.Stdin)
+	if err != nil {
+		fmt.Println("File error:", err.Error())
+		os.Exit(1)
+	}
+	if len(data) == 0 {
+		fmt.Println("No input data.")
+		os.Exit(1)
+	}
+	request := &ExtensionHandlerRequest{}
+	err = proto.Unmarshal(data, request)
+	if err != nil {
+		fmt.Println("Input error:", err.Error())
+		os.Exit(1)
+	}
+	handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml)
+}
+
+// ProcessExtension calles the handler for a specified extension.
+func ProcessExtension(handleExtension extensionHandler) {
+	response := &ExtensionHandlerResponse{}
+	forInputYamlFromOpenapic(
+		func(version string, extensionName string, yamlInput string) {
+			var newObject proto.Message
+			var err error
+
+			handled, newObject, err := handleExtension(extensionName, yamlInput)
+			if !handled {
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+
+			// If we reach here, then the extension is handled
+			response.Handled = true
+			if err != nil {
+				response.Error = append(response.Error, err.Error())
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+			response.Value, err = ptypes.MarshalAny(newObject)
+			if err != nil {
+				response.Error = append(response.Error, err.Error())
+				responseBytes, _ := proto.Marshal(response)
+				os.Stdout.Write(responseBytes)
+				os.Exit(0)
+			}
+		})
+
+	responseBytes, _ := proto.Marshal(response)
+	os.Stdout.Write(responseBytes)
+}
diff --git a/vendor/github.com/imdario/mergo/.gitignore b/vendor/github.com/imdario/mergo/.gitignore
new file mode 100644
index 0000000..529c341
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.gitignore
@@ -0,0 +1,33 @@
+#### joe made this: http://goel.io/joe
+
+#### go ####
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+#### vim ####
+# Swap
+[._]*.s[a-v][a-z]
+[._]*.sw[a-p]
+[._]s[a-v][a-z]
+[._]sw[a-p]
+
+# Session
+Session.vim
+
+# Temporary
+.netrwhist
+*~
+# Auto-generated tag files
+tags
diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml
new file mode 100644
index 0000000..b13a50e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+install:
+  - go get -t
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/mattn/goveralls
+script:
+  - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..469b449
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE
new file mode 100644
index 0000000..6866802
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2013 Dario Castañé. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md
new file mode 100644
index 0000000..02fc81e
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/README.md
@@ -0,0 +1,238 @@
+# Mergo
+
+A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
+
+Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
+
+## Status
+
+It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
+
+[![GoDoc][3]][4]
+[![GoCard][5]][6]
+[![Build Status][1]][2]
+[![Coverage Status][7]][8]
+[![Sourcegraph][9]][10]
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield)
+
+[1]: https://travis-ci.org/imdario/mergo.png
+[2]: https://travis-ci.org/imdario/mergo
+[3]: https://godoc.org/github.com/imdario/mergo?status.svg
+[4]: https://godoc.org/github.com/imdario/mergo
+[5]: https://goreportcard.com/badge/imdario/mergo
+[6]: https://goreportcard.com/report/github.com/imdario/mergo
+[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
+[8]: https://coveralls.io/github/imdario/mergo?branch=master
+[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
+[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
+
+### Latest release
+
+[Release v0.3.7](https://github.com/imdario/mergo/releases/tag/v0.3.7).
+
+### Important note
+
+Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
+
+If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
+
+### Donations
+
+If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
+
+<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
+[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo)
+[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo)
+<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
+
+### Mergo in the wild
+
+- [moby/moby](https://github.com/moby/moby)
+- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
+- [vmware/dispatch](https://github.com/vmware/dispatch)
+- [Shopify/themekit](https://github.com/Shopify/themekit)
+- [imdario/zas](https://github.com/imdario/zas)
+- [matcornic/hermes](https://github.com/matcornic/hermes)
+- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
+- [kataras/iris](https://github.com/kataras/iris)
+- [michaelsauter/crane](https://github.com/michaelsauter/crane)
+- [go-task/task](https://github.com/go-task/task)
+- [sensu/uchiwa](https://github.com/sensu/uchiwa)
+- [ory/hydra](https://github.com/ory/hydra)
+- [sisatech/vcli](https://github.com/sisatech/vcli)
+- [dairycart/dairycart](https://github.com/dairycart/dairycart)
+- [projectcalico/felix](https://github.com/projectcalico/felix)
+- [resin-os/balena](https://github.com/resin-os/balena)
+- [go-kivik/kivik](https://github.com/go-kivik/kivik)
+- [Telefonica/govice](https://github.com/Telefonica/govice)
+- [supergiant/supergiant](supergiant/supergiant)
+- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
+- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
+- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
+- [EagerIO/Stout](https://github.com/EagerIO/Stout)
+- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
+- [russross/canvasassignments](https://github.com/russross/canvasassignments)
+- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
+- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
+- [divshot/gitling](https://github.com/divshot/gitling)
+- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
+- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
+- [elwinar/rambler](https://github.com/elwinar/rambler)
+- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
+- [jfbus/impressionist](https://github.com/jfbus/impressionist)
+- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
+- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
+- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
+- [thoas/picfit](https://github.com/thoas/picfit)
+- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
+- [jnuthong/item_search](https://github.com/jnuthong/item_search)
+- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
+
+## Installation
+
+    go get github.com/imdario/mergo
+
+    // use in your .go code
+    import (
+        "github.com/imdario/mergo"
+    )
+
+## Usage
+
+You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
+
+```go
+if err := mergo.Merge(&dst, src); err != nil {
+    // ...
+}
+```
+
+Also, you can merge overwriting values using the transformer `WithOverride`.
+
+```go
+if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
+    // ...
+}
+```
+
+Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
+
+```go
+if err := mergo.Map(&dst, srcMap); err != nil {
+    // ...
+}
+```
+
+Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
+
+More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
+
+### Nice example
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/imdario/mergo"
+)
+
+type Foo struct {
+	A string
+	B int64
+}
+
+func main() {
+	src := Foo{
+		A: "one",
+		B: 2,
+	}
+	dest := Foo{
+		A: "two",
+	}
+	mergo.Merge(&dest, src)
+	fmt.Println(dest)
+	// Will print
+	// {two 2}
+}
+```
+
+Note: if test are failing due missing package, please execute:
+
+    go get gopkg.in/yaml.v2
+
+### Transformers
+
+Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/imdario/mergo"
+        "reflect"
+        "time"
+)
+
+type timeTransfomer struct {
+}
+
+func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
+	if typ == reflect.TypeOf(time.Time{}) {
+		return func(dst, src reflect.Value) error {
+			if dst.CanSet() {
+				isZero := dst.MethodByName("IsZero")
+				result := isZero.Call([]reflect.Value{})
+				if result[0].Bool() {
+					dst.Set(src)
+				}
+			}
+			return nil
+		}
+	}
+	return nil
+}
+
+type Snapshot struct {
+	Time time.Time
+	// ...
+}
+
+func main() {
+	src := Snapshot{time.Now()}
+	dest := Snapshot{}
+	mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
+	fmt.Println(dest)
+	// Will print
+	// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
+}
+```
+
+
+## Contact me
+
+If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
+
+## About
+
+Written by [Dario Castañé](http://dario.im).
+
+## Top Contributors
+
+[![0](https://sourcerer.io/fame/imdario/imdario/mergo/images/0)](https://sourcerer.io/fame/imdario/imdario/mergo/links/0)
+[![1](https://sourcerer.io/fame/imdario/imdario/mergo/images/1)](https://sourcerer.io/fame/imdario/imdario/mergo/links/1)
+[![2](https://sourcerer.io/fame/imdario/imdario/mergo/images/2)](https://sourcerer.io/fame/imdario/imdario/mergo/links/2)
+[![3](https://sourcerer.io/fame/imdario/imdario/mergo/images/3)](https://sourcerer.io/fame/imdario/imdario/mergo/links/3)
+[![4](https://sourcerer.io/fame/imdario/imdario/mergo/images/4)](https://sourcerer.io/fame/imdario/imdario/mergo/links/4)
+[![5](https://sourcerer.io/fame/imdario/imdario/mergo/images/5)](https://sourcerer.io/fame/imdario/imdario/mergo/links/5)
+[![6](https://sourcerer.io/fame/imdario/imdario/mergo/images/6)](https://sourcerer.io/fame/imdario/imdario/mergo/links/6)
+[![7](https://sourcerer.io/fame/imdario/imdario/mergo/images/7)](https://sourcerer.io/fame/imdario/imdario/mergo/links/7)
+
+
+## License
+
+[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
+
+
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go
new file mode 100644
index 0000000..6e9aa7b
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/doc.go
@@ -0,0 +1,44 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mergo merges same-type structs and maps by setting default values in zero-value fields.
+
+Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
+
+Usage
+
+From my own work-in-progress project:
+
+	type networkConfig struct {
+		Protocol string
+		Address string
+		ServerType string `json: "server_type"`
+		Port uint16
+	}
+
+	type FssnConfig struct {
+		Network networkConfig
+	}
+
+	var fssnDefault = FssnConfig {
+		networkConfig {
+			"tcp",
+			"127.0.0.1",
+			"http",
+			31560,
+		},
+	}
+
+	// Inside a function [...]
+
+	if err := mergo.Merge(&config, fssnDefault); err != nil {
+		log.Fatal(err)
+	}
+
+	// More code [...]
+
+*/
+package mergo
diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go
new file mode 100644
index 0000000..3f5afa8
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/map.go
@@ -0,0 +1,175 @@
+// Copyright 2014 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"fmt"
+	"reflect"
+	"unicode"
+	"unicode/utf8"
+)
+
+func changeInitialCase(s string, mapper func(rune) rune) string {
+	if s == "" {
+		return s
+	}
+	r, n := utf8.DecodeRuneInString(s)
+	return string(mapper(r)) + s[n:]
+}
+
+func isExported(field reflect.StructField) bool {
+	r, _ := utf8.DecodeRuneInString(field.Name)
+	return r >= 'A' && r <= 'Z'
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+	overwrite := config.Overwrite
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	zeroValue := reflect.Value{}
+	switch dst.Kind() {
+	case reflect.Map:
+		dstMap := dst.Interface().(map[string]interface{})
+		for i, n := 0, src.NumField(); i < n; i++ {
+			srcType := src.Type()
+			field := srcType.Field(i)
+			if !isExported(field) {
+				continue
+			}
+			fieldName := field.Name
+			fieldName = changeInitialCase(fieldName, unicode.ToLower)
+			if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
+				dstMap[fieldName] = src.Field(i).Interface()
+			}
+		}
+	case reflect.Ptr:
+		if dst.IsNil() {
+			v := reflect.New(dst.Type().Elem())
+			dst.Set(v)
+		}
+		dst = dst.Elem()
+		fallthrough
+	case reflect.Struct:
+		srcMap := src.Interface().(map[string]interface{})
+		for key := range srcMap {
+			config.overwriteWithEmptyValue = true
+			srcValue := srcMap[key]
+			fieldName := changeInitialCase(key, unicode.ToUpper)
+			dstElement := dst.FieldByName(fieldName)
+			if dstElement == zeroValue {
+				// We discard it because the field doesn't exist.
+				continue
+			}
+			srcElement := reflect.ValueOf(srcValue)
+			dstKind := dstElement.Kind()
+			srcKind := srcElement.Kind()
+			if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
+				srcElement = srcElement.Elem()
+				srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
+			} else if dstKind == reflect.Ptr {
+				// Can this work? I guess it can't.
+				if srcKind != reflect.Ptr && srcElement.CanAddr() {
+					srcPtr := srcElement.Addr()
+					srcElement = reflect.ValueOf(srcPtr)
+					srcKind = reflect.Ptr
+				}
+			}
+
+			if !srcElement.IsValid() {
+				continue
+			}
+			if srcKind == dstKind {
+				if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+					return
+				}
+			} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
+				if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
+					return
+				}
+			} else if srcKind == reflect.Map {
+				if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
+					return
+				}
+			} else {
+				return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
+			}
+		}
+	}
+	return
+}
+
+// Map sets fields' values in dst from src.
+// src can be a map with string keys or a struct. dst must be the opposite:
+// if src is a map, dst must be a valid pointer to struct. If src is a struct,
+// dst must be map[string]interface{}.
+// It won't merge unexported (private) fields and will do recursively
+// any exported field.
+// If dst is a map, keys will be src fields' names in lower camel case.
+// Missing key in src that doesn't match a field in dst will be skipped. This
+// doesn't apply if dst is a map.
+// This is separated method from Merge because it is cleaner and it keeps sane
+// semantics: merging equal types, mapping different (restricted) types.
+func Map(dst, src interface{}, opts ...func(*Config)) error {
+	return _map(dst, src, opts...)
+}
+
+// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
+// non-empty src attribute values.
+// Deprecated: Use Map(…) with WithOverride
+func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+	return _map(dst, src, append(opts, WithOverride)...)
+}
+
+func _map(dst, src interface{}, opts ...func(*Config)) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+	config := &Config{}
+
+	for _, opt := range opts {
+		opt(config)
+	}
+
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	// To be friction-less, we redirect equal-type arguments
+	// to deepMerge. Only because arguments can be anything.
+	if vSrc.Kind() == vDst.Kind() {
+		return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+	}
+	switch vSrc.Kind() {
+	case reflect.Struct:
+		if vDst.Kind() != reflect.Map {
+			return ErrExpectedMapAsDestination
+		}
+	case reflect.Map:
+		if vDst.Kind() != reflect.Struct {
+			return ErrExpectedStructAsDestination
+		}
+	default:
+		return ErrNotSupported
+	}
+	return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go
new file mode 100644
index 0000000..f8de6c5
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/merge.go
@@ -0,0 +1,255 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func hasExportedField(dst reflect.Value) (exported bool) {
+	for i, n := 0, dst.NumField(); i < n; i++ {
+		field := dst.Type().Field(i)
+		if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
+			exported = exported || hasExportedField(dst.Field(i))
+		} else {
+			exported = exported || len(field.PkgPath) == 0
+		}
+	}
+	return
+}
+
+type Config struct {
+	Overwrite               bool
+	AppendSlice             bool
+	Transformers            Transformers
+	overwriteWithEmptyValue bool
+}
+
+type Transformers interface {
+	Transformer(reflect.Type) func(dst, src reflect.Value) error
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
+	overwrite := config.Overwrite
+	overwriteWithEmptySrc := config.overwriteWithEmptyValue
+	config.overwriteWithEmptyValue = false
+
+	if !src.IsValid() {
+		return
+	}
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+
+	if config.Transformers != nil && !isEmptyValue(dst) {
+		if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
+			err = fn(dst, src)
+			return
+		}
+	}
+
+	switch dst.Kind() {
+	case reflect.Struct:
+		if hasExportedField(dst) {
+			for i, n := 0, dst.NumField(); i < n; i++ {
+				if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
+					return
+				}
+			}
+		} else {
+			if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
+				dst.Set(src)
+			}
+		}
+	case reflect.Map:
+		if dst.IsNil() && !src.IsNil() {
+			dst.Set(reflect.MakeMap(dst.Type()))
+		}
+		for _, key := range src.MapKeys() {
+			srcElement := src.MapIndex(key)
+			if !srcElement.IsValid() {
+				continue
+			}
+			dstElement := dst.MapIndex(key)
+			switch srcElement.Kind() {
+			case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
+				if srcElement.IsNil() {
+					continue
+				}
+				fallthrough
+			default:
+				if !srcElement.CanInterface() {
+					continue
+				}
+				switch reflect.TypeOf(srcElement.Interface()).Kind() {
+				case reflect.Struct:
+					fallthrough
+				case reflect.Ptr:
+					fallthrough
+				case reflect.Map:
+					srcMapElm := srcElement
+					dstMapElm := dstElement
+					if srcMapElm.CanInterface() {
+						srcMapElm = reflect.ValueOf(srcMapElm.Interface())
+						if dstMapElm.IsValid() {
+							dstMapElm = reflect.ValueOf(dstMapElm.Interface())
+						}
+					}
+					if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
+						return
+					}
+				case reflect.Slice:
+					srcSlice := reflect.ValueOf(srcElement.Interface())
+
+					var dstSlice reflect.Value
+					if !dstElement.IsValid() || dstElement.IsNil() {
+						dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
+					} else {
+						dstSlice = reflect.ValueOf(dstElement.Interface())
+					}
+
+					if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+						dstSlice = srcSlice
+					} else if config.AppendSlice {
+						if srcSlice.Type() != dstSlice.Type() {
+							return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
+						}
+						dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
+					}
+					dst.SetMapIndex(key, dstSlice)
+				}
+			}
+			if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) {
+				continue
+			}
+
+			if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
+				if dst.IsNil() {
+					dst.Set(reflect.MakeMap(dst.Type()))
+				}
+				dst.SetMapIndex(key, srcElement)
+			}
+		}
+	case reflect.Slice:
+		if !dst.CanSet() {
+			break
+		}
+		if (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
+			dst.Set(src)
+		} else if config.AppendSlice {
+			if src.Type() != dst.Type() {
+				return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
+			}
+			dst.Set(reflect.AppendSlice(dst, src))
+		}
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Interface:
+		if src.IsNil() {
+			break
+		}
+		if src.Kind() != reflect.Interface {
+			if dst.IsNil() || overwrite {
+				if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+					dst.Set(src)
+				}
+			} else if src.Kind() == reflect.Ptr {
+				if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+					return
+				}
+			} else if dst.Elem().Type() == src.Type() {
+				if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
+					return
+				}
+			} else {
+				return ErrDifferentArgumentsTypes
+			}
+			break
+		}
+		if dst.IsNil() || overwrite {
+			if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
+				dst.Set(src)
+			}
+		} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
+			return
+		}
+	default:
+		if dst.CanSet() && (!isEmptyValue(src) || overwriteWithEmptySrc) && (overwrite || isEmptyValue(dst)) {
+			dst.Set(src)
+		}
+	}
+	return
+}
+
+// Merge will fill any empty for value type attributes on the dst struct using corresponding
+// src attributes if they themselves are not empty. dst and src must be valid same-type structs
+// and dst must be a pointer to struct.
+// It won't merge unexported (private) fields and will do recursively any exported field.
+func Merge(dst, src interface{}, opts ...func(*Config)) error {
+	return merge(dst, src, opts...)
+}
+
+// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
+// non-empty src attribute values.
+// Deprecated: use Merge(…) with WithOverride
+func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
+	return merge(dst, src, append(opts, WithOverride)...)
+}
+
+// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
+func WithTransformers(transformers Transformers) func(*Config) {
+	return func(config *Config) {
+		config.Transformers = transformers
+	}
+}
+
+// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
+func WithOverride(config *Config) {
+	config.Overwrite = true
+}
+
+// WithAppendSlice will make merge append slices instead of overwriting it
+func WithAppendSlice(config *Config) {
+	config.AppendSlice = true
+}
+
+func merge(dst, src interface{}, opts ...func(*Config)) error {
+	var (
+		vDst, vSrc reflect.Value
+		err        error
+	)
+
+	config := &Config{}
+
+	for _, opt := range opts {
+		opt(config)
+	}
+
+	if vDst, vSrc, err = resolveValues(dst, src); err != nil {
+		return err
+	}
+	if vDst.Type() != vSrc.Type() {
+		return ErrDifferentArgumentsTypes
+	}
+	return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
+}
diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go
new file mode 100644
index 0000000..a82fea2
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/mergo.go
@@ -0,0 +1,97 @@
+// Copyright 2013 Dario Castañé. All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Based on src/pkg/reflect/deepequal.go from official
+// golang's stdlib.
+
+package mergo
+
+import (
+	"errors"
+	"reflect"
+)
+
+// Errors reported by Mergo when it finds invalid arguments.
+var (
+	ErrNilArguments                = errors.New("src and dst must not be nil")
+	ErrDifferentArgumentsTypes     = errors.New("src and dst must be of same type")
+	ErrNotSupported                = errors.New("only structs and maps are supported")
+	ErrExpectedMapAsDestination    = errors.New("dst was expected to be a map")
+	ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
+)
+
+// During deepMerge, must keep track of checks that are
+// in progress.  The comparison algorithm assumes that all
+// checks in progress are true when it reencounters them.
+// Visited are stored in a map indexed by 17 * a1 + a2;
+type visit struct {
+	ptr  uintptr
+	typ  reflect.Type
+	next *visit
+}
+
+// From src/pkg/encoding/json/encode.go.
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		if v.IsNil() {
+			return true
+		}
+		return isEmptyValue(v.Elem())
+	case reflect.Func:
+		return v.IsNil()
+	case reflect.Invalid:
+		return true
+	}
+	return false
+}
+
+func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
+	if dst == nil || src == nil {
+		err = ErrNilArguments
+		return
+	}
+	vDst = reflect.ValueOf(dst).Elem()
+	if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map {
+		err = ErrNotSupported
+		return
+	}
+	vSrc = reflect.ValueOf(src)
+	// We check if vSrc is a pointer to dereference it.
+	if vSrc.Kind() == reflect.Ptr {
+		vSrc = vSrc.Elem()
+	}
+	return
+}
+
+// Traverses recursively both values, assigning src's fields values to dst.
+// The map argument tracks comparisons that have already been seen, which allows
+// short circuiting on recursive types.
+func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
+	if dst.CanAddr() {
+		addr := dst.UnsafeAddr()
+		h := 17 * addr
+		seen := visited[h]
+		typ := dst.Type()
+		for p := seen; p != nil; p = p.next {
+			if p.ptr == addr && p.typ == typ {
+				return nil
+			}
+		}
+		// Remember, remember...
+		visited[h] = &visit{addr, typ, seen}
+	}
+	return // TODO refactor
+}
diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml
new file mode 100644
index 0000000..2f1ad00
--- /dev/null
+++ b/vendor/github.com/imdario/mergo/testdata/license.yml
@@ -0,0 +1,4 @@
+import: ../../../../fossene/db/schema/thing.yml
+fields:
+    site: string
+    author: root
diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml
new file mode 100644
index 0000000..0f0728d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/.travis.yml
@@ -0,0 +1,44 @@
+language: go
+
+os:
+  - linux
+  - osx
+
+go:
+  - 1.x
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+
+install:
+  # go-flags
+  - go get -d -v ./...
+  - go build -v ./...
+
+  # linting
+  - go get github.com/golang/lint/golint
+
+  # code coverage
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/onsi/ginkgo/ginkgo
+  - go get github.com/modocache/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi
+
+script:
+  # go-flags
+  - $(exit $(gofmt -l . | wc -l))
+  - go test -v ./...
+
+  # linting
+  - go tool vet -all=true -v=true . || true
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./...
+
+  # code coverage
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
+
+env:
+  # coveralls.io
+  secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU="
diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE
new file mode 100644
index 0000000..bcca0d5
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012 Jesse van den Kieboom. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+     copyright notice, this list of conditions and the following disclaimer
+     in the documentation and/or other materials provided with the
+     distribution.
+   * Neither the name of Google Inc. nor the names of its
+     contributors may be used to endorse or promote products derived from
+     this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md
new file mode 100644
index 0000000..3b02394
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/README.md
@@ -0,0 +1,134 @@
+go-flags: a go library for parsing command line arguments
+=========================================================
+
+[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master)
+
+This library provides similar functionality to the builtin flag library of
+go, but provides much more functionality and nicer formatting. From the
+documentation:
+
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go builtin flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+Supported features:
+* Options with short names (-v)
+* Options with long names (--verbose)
+* Options with and without arguments (bool v.s. other type)
+* Options with optional arguments and default values
+* Multiple option groups each containing a set of options
+* Generate and print well-formatted help message
+* Passing remaining command line arguments after -- (optional)
+* Ignoring unknown command line options (optional)
+* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+* Supports multiple short options -aux
+* Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+* Supports same option multiple times (can store in slice or last option counts)
+* Supports maps
+* Supports function callbacks
+* Supports namespaces for (nested) option groups
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+```go
+type Options struct {
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+}
+```
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Example:
+--------
+```go
+var opts struct {
+	// Slice of bool will append 'true' each time the option
+	// is encountered (can be set multiple times, like -vvv)
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+
+	// Example of automatic marshalling to desired type (uint)
+	Offset uint `long:"offset" description:"Offset"`
+
+	// Example of a callback, called each time the option is found.
+	Call func(string) `short:"c" description:"Call phone number"`
+
+	// Example of a required flag
+	Name string `short:"n" long:"name" description:"A name" required:"true"`
+
+	// Example of a value name
+	File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
+
+	// Example of a pointer
+	Ptr *int `short:"p" description:"A pointer to an integer"`
+
+	// Example of a slice of strings
+	StringSlice []string `short:"s" description:"A slice of strings"`
+
+	// Example of a slice of pointers
+	PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+
+	// Example of a map
+	IntMap map[string]int `long:"intmap" description:"A map from string to int"`
+}
+
+// Callback which will invoke callto:<argument> to call a number.
+// Note that this works just on OS X (and probably only with
+// Skype) but it shows the idea.
+opts.Call = func(num string) {
+	cmd := exec.Command("open", "callto:"+num)
+	cmd.Start()
+	cmd.Process.Release()
+}
+
+// Make some fake arguments to parse.
+args := []string{
+	"-vv",
+	"--offset=5",
+	"-n", "Me",
+	"-p", "3",
+	"-s", "hello",
+	"-s", "world",
+	"--ptrslice", "hello",
+	"--ptrslice", "world",
+	"--intmap", "a:1",
+	"--intmap", "b:5",
+	"arg1",
+	"arg2",
+	"arg3",
+}
+
+// Parse flags from `args'. Note that here we use flags.ParseArgs for
+// the sake of making a working example. Normally, you would simply use
+// flags.Parse(&opts) which uses os.Args
+args, err := flags.ParseArgs(&opts, args)
+
+if err != nil {
+	panic(err)
+}
+
+fmt.Printf("Verbosity: %v\n", opts.Verbose)
+fmt.Printf("Offset: %d\n", opts.Offset)
+fmt.Printf("Name: %s\n", opts.Name)
+fmt.Printf("Ptr: %d\n", *opts.Ptr)
+fmt.Printf("StringSlice: %v\n", opts.StringSlice)
+fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
+fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"])
+fmt.Printf("Remaining args: %s\n", strings.Join(args, " "))
+
+// Output: Verbosity: [true true]
+// Offset: 5
+// Name: Me
+// Ptr: 3
+// StringSlice: [hello world]
+// PtrSlice: [hello world]
+// IntMap: [a:1 b:5]
+// Remaining args: arg1 arg2 arg3
+```
+
+More information can be found in the godocs: <http://godoc.org/github.com/jessevdk/go-flags>
diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go
new file mode 100644
index 0000000..8ec6204
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/arg.go
@@ -0,0 +1,27 @@
+package flags
+
+import (
+	"reflect"
+)
+
+// Arg represents a positional argument on the command line.
+type Arg struct {
+	// The name of the positional argument (used in the help)
+	Name string
+
+	// A description of the positional argument (used in the help)
+	Description string
+
+	// The minimal number of required positional arguments
+	Required int
+
+	// The maximum number of required positional arguments
+	RequiredMaximum int
+
+	value reflect.Value
+	tag   multiTag
+}
+
+func (a *Arg) isRemaining() bool {
+	return a.value.Type().Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
new file mode 100755
index 0000000..c494f61
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+
+set -e
+
+echo '# linux arm7'
+GOARM=7 GOARCH=arm GOOS=linux go build
+echo '# linux arm5'
+GOARM=5 GOARCH=arm GOOS=linux go build
+echo '# windows 386'
+GOARCH=386 GOOS=windows go build
+echo '# windows amd64'
+GOARCH=amd64 GOOS=windows go build
+echo '# darwin'
+GOARCH=amd64 GOOS=darwin go build
+echo '# freebsd'
+GOARCH=amd64 GOOS=freebsd go build
diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go
new file mode 100644
index 0000000..3b51875
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/closest.go
@@ -0,0 +1,59 @@
+package flags
+
+func levenshtein(s string, t string) int {
+	if len(s) == 0 {
+		return len(t)
+	}
+
+	if len(t) == 0 {
+		return len(s)
+	}
+
+	dists := make([][]int, len(s)+1)
+	for i := range dists {
+		dists[i] = make([]int, len(t)+1)
+		dists[i][0] = i
+	}
+
+	for j := range t {
+		dists[0][j] = j
+	}
+
+	for i, sc := range s {
+		for j, tc := range t {
+			if sc == tc {
+				dists[i+1][j+1] = dists[i][j]
+			} else {
+				dists[i+1][j+1] = dists[i][j] + 1
+				if dists[i+1][j] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i+1][j] + 1
+				}
+				if dists[i][j+1] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i][j+1] + 1
+				}
+			}
+		}
+	}
+
+	return dists[len(s)][len(t)]
+}
+
+func closestChoice(cmd string, choices []string) (string, int) {
+	if len(choices) == 0 {
+		return "", 0
+	}
+
+	mincmd := -1
+	mindist := -1
+
+	for i, c := range choices {
+		l := levenshtein(cmd, c)
+
+		if mincmd < 0 || l < mindist {
+			mindist = l
+			mincmd = i
+		}
+	}
+
+	return choices[mincmd], mindist
+}
diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go
new file mode 100644
index 0000000..486bacb
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/command.go
@@ -0,0 +1,465 @@
+package flags
+
+import (
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Command represents an application command. Commands can be added to the
+// parser (which itself is a command) and are selected/executed when its name
+// is specified on the command line. The Command type embeds a Group and
+// therefore also carries a set of command specific options.
+type Command struct {
+	// Embedded, see Group for more information
+	*Group
+
+	// The name by which the command can be invoked
+	Name string
+
+	// The active sub command (set by parsing) or nil
+	Active *Command
+
+	// Whether subcommands are optional
+	SubcommandsOptional bool
+
+	// Aliases for the command
+	Aliases []string
+
+	// Whether positional arguments are required
+	ArgsRequired bool
+
+	commands            []*Command
+	hasBuiltinHelpGroup bool
+	args                []*Arg
+}
+
+// Commander is an interface which can be implemented by any command added in
+// the options. When implemented, the Execute method will be called for the last
+// specified (sub)command providing the remaining command line arguments.
+type Commander interface {
+	// Execute will be called for the last active (sub)command. The
+	// args argument contains the remaining command line arguments. The
+	// error that Execute returns will be eventually passed out of the
+	// Parse method of the Parser.
+	Execute(args []string) error
+}
+
+// Usage is an interface which can be implemented to show a custom usage string
+// in the help message shown for a command.
+type Usage interface {
+	// Usage is called for commands to allow customized printing of command
+	// usage in the generated help message.
+	Usage() string
+}
+
+type lookup struct {
+	shortNames map[string]*Option
+	longNames  map[string]*Option
+
+	commands map[string]*Command
+}
+
+// AddCommand adds a new command to the parser with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the command. The provided data can implement the Command and
+// Usage interfaces.
+func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) {
+	cmd := newCommand(command, shortDescription, longDescription, data)
+
+	cmd.parent = c
+
+	if err := cmd.scan(); err != nil {
+		return nil, err
+	}
+
+	c.commands = append(c.commands, cmd)
+	return cmd, nil
+}
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = c
+
+	if err := group.scanType(c.scanSubcommandHandler(group)); err != nil {
+		return nil, err
+	}
+
+	c.groups = append(c.groups, group)
+	return group, nil
+}
+
+// Commands returns a list of subcommands of this command.
+func (c *Command) Commands() []*Command {
+	return c.commands
+}
+
+// Find locates the subcommand with the given name and returns it. If no such
+// command can be found Find will return nil.
+func (c *Command) Find(name string) *Command {
+	for _, cc := range c.commands {
+		if cc.match(name) {
+			return cc
+		}
+	}
+
+	return nil
+}
+
+// FindOptionByLongName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByLongName(longName string) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByLongName(longName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// FindOptionByShortName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByShortName(shortName rune) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByShortName(shortName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// Args returns a list of positional arguments associated with this command.
+func (c *Command) Args() []*Arg {
+	ret := make([]*Arg, len(c.args))
+	copy(ret, c.args)
+
+	return ret
+}
+
+func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command {
+	return &Command{
+		Group: newGroup(shortDescription, longDescription, data),
+		Name:  name,
+	}
+}
+
+func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler {
+	f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+		mtag := newMultiTag(string(sfield.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return true, err
+		}
+
+		positional := mtag.Get("positional-args")
+
+		if len(positional) != 0 {
+			stype := realval.Type()
+
+			for i := 0; i < stype.NumField(); i++ {
+				field := stype.Field(i)
+
+				m := newMultiTag((string(field.Tag)))
+
+				if err := m.Parse(); err != nil {
+					return true, err
+				}
+
+				name := m.Get("positional-arg-name")
+
+				if len(name) == 0 {
+					name = field.Name
+				}
+
+				required := -1
+				requiredMaximum := -1
+
+				sreq := m.Get("required")
+
+				if sreq != "" {
+					required = 1
+
+					rng := strings.SplitN(sreq, "-", 2)
+
+					if len(rng) > 1 {
+						if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil {
+							required = int(preq)
+						}
+
+						if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil {
+							requiredMaximum = int(preq)
+						}
+					} else {
+						if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil {
+							required = int(preq)
+						}
+					}
+				}
+
+				arg := &Arg{
+					Name:            name,
+					Description:     m.Get("description"),
+					Required:        required,
+					RequiredMaximum: requiredMaximum,
+
+					value: realval.Field(i),
+					tag:   m,
+				}
+
+				c.args = append(c.args, arg)
+
+				if len(mtag.Get("required")) != 0 {
+					c.ArgsRequired = true
+				}
+			}
+
+			return true, nil
+		}
+
+		subcommand := mtag.Get("command")
+
+		if len(subcommand) != 0 {
+			var ptrval reflect.Value
+
+			if realval.Kind() == reflect.Ptr {
+				ptrval = realval
+
+				if ptrval.IsNil() {
+					ptrval.Set(reflect.New(ptrval.Type().Elem()))
+				}
+			} else {
+				ptrval = realval.Addr()
+			}
+
+			shortDescription := mtag.Get("description")
+			longDescription := mtag.Get("long-description")
+			subcommandsOptional := mtag.Get("subcommands-optional")
+			aliases := mtag.GetMany("alias")
+
+			subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface())
+
+			if err != nil {
+				return true, err
+			}
+
+			subc.Hidden = mtag.Get("hidden") != ""
+
+			if len(subcommandsOptional) > 0 {
+				subc.SubcommandsOptional = true
+			}
+
+			if len(aliases) > 0 {
+				subc.Aliases = aliases
+			}
+
+			return true, nil
+		}
+
+		return parentg.scanSubGroupHandler(realval, sfield)
+	}
+
+	return f
+}
+
+func (c *Command) scan() error {
+	return c.scanType(c.scanSubcommandHandler(c.Group))
+}
+
+func (c *Command) eachOption(f func(*Command, *Group, *Option)) {
+	c.eachCommand(func(c *Command) {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				f(c, g, option)
+			}
+		})
+	}, true)
+}
+
+func (c *Command) eachCommand(f func(*Command), recurse bool) {
+	f(c)
+
+	for _, cc := range c.commands {
+		if recurse {
+			cc.eachCommand(f, true)
+		} else {
+			f(cc)
+		}
+	}
+}
+
+func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) {
+	c.eachGroup(func(g *Group) {
+		f(c, g)
+	})
+
+	if c.Active != nil {
+		c.Active.eachActiveGroup(f)
+	}
+}
+
+func (c *Command) addHelpGroups(showHelp func() error) {
+	if !c.hasBuiltinHelpGroup {
+		c.addHelpGroup(showHelp)
+		c.hasBuiltinHelpGroup = true
+	}
+
+	for _, cc := range c.commands {
+		cc.addHelpGroups(showHelp)
+	}
+}
+
+func (c *Command) makeLookup() lookup {
+	ret := lookup{
+		shortNames: make(map[string]*Option),
+		longNames:  make(map[string]*Option),
+		commands:   make(map[string]*Command),
+	}
+
+	parent := c.parent
+
+	var parents []*Command
+
+	for parent != nil {
+		if cmd, ok := parent.(*Command); ok {
+			parents = append(parents, cmd)
+			parent = cmd.parent
+		} else {
+			parent = nil
+		}
+	}
+
+	for i := len(parents) - 1; i >= 0; i-- {
+		parents[i].fillLookup(&ret, true)
+	}
+
+	c.fillLookup(&ret, false)
+	return ret
+}
+
+func (c *Command) fillLookup(ret *lookup, onlyOptions bool) {
+	c.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.ShortName != 0 {
+				ret.shortNames[string(option.ShortName)] = option
+			}
+
+			if len(option.LongName) > 0 {
+				ret.longNames[option.LongNameWithNamespace()] = option
+			}
+		}
+	})
+
+	if onlyOptions {
+		return
+	}
+
+	for _, subcommand := range c.commands {
+		ret.commands[subcommand.Name] = subcommand
+
+		for _, a := range subcommand.Aliases {
+			ret.commands[a] = subcommand
+		}
+	}
+}
+
+func (c *Command) groupByName(name string) *Group {
+	if grp := c.Group.groupByName(name); grp != nil {
+		return grp
+	}
+
+	for _, subc := range c.commands {
+		prefix := subc.Name + "."
+
+		if strings.HasPrefix(name, prefix) {
+			if grp := subc.groupByName(name[len(prefix):]); grp != nil {
+				return grp
+			}
+		} else if name == subc.Name {
+			return subc.Group
+		}
+	}
+
+	return nil
+}
+
+type commandList []*Command
+
+func (c commandList) Less(i, j int) bool {
+	return c[i].Name < c[j].Name
+}
+
+func (c commandList) Len() int {
+	return len(c)
+}
+
+func (c commandList) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+func (c *Command) sortedVisibleCommands() []*Command {
+	ret := commandList(c.visibleCommands())
+	sort.Sort(ret)
+
+	return []*Command(ret)
+}
+
+func (c *Command) visibleCommands() []*Command {
+	ret := make([]*Command, 0, len(c.commands))
+
+	for _, cmd := range c.commands {
+		if !cmd.Hidden {
+			ret = append(ret, cmd)
+		}
+	}
+
+	return ret
+}
+
+func (c *Command) match(name string) bool {
+	if c.Name == name {
+		return true
+	}
+
+	for _, v := range c.Aliases {
+		if v == name {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (c *Command) hasCliOptions() bool {
+	ret := false
+
+	c.eachGroup(func(g *Group) {
+		if g.isBuiltinHelp {
+			return
+		}
+
+		for _, opt := range g.options {
+			if opt.canCli() {
+				ret = true
+			}
+		}
+	})
+
+	return ret
+}
+
+func (c *Command) fillParseState(s *parseState) {
+	s.positional = make([]*Arg, len(c.args))
+	copy(s.positional, c.args)
+
+	s.lookup = c.makeLookup()
+	s.command = c
+}
diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go
new file mode 100644
index 0000000..7a7a08b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/completion.go
@@ -0,0 +1,309 @@
+package flags
+
+import (
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// Completion is a type containing information of a completion.
+type Completion struct {
+	// The completed item
+	Item string
+
+	// A description of the completed item (optional)
+	Description string
+}
+
+type completions []Completion
+
+func (c completions) Len() int {
+	return len(c)
+}
+
+func (c completions) Less(i, j int) bool {
+	return c[i].Item < c[j].Item
+}
+
+func (c completions) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+// Completer is an interface which can be implemented by types
+// to provide custom command line argument completion.
+type Completer interface {
+	// Complete receives a prefix representing a (partial) value
+	// for its type and should provide a list of possible valid
+	// completions.
+	Complete(match string) []Completion
+}
+
+type completion struct {
+	parser *Parser
+}
+
+// Filename is a string alias which provides filename completion.
+type Filename string
+
+func completionsWithoutDescriptions(items []string) []Completion {
+	ret := make([]Completion, len(items))
+
+	for i, v := range items {
+		ret[i].Item = v
+	}
+
+	return ret
+}
+
+// Complete returns a list of existing files with the given
+// prefix.
+func (f *Filename) Complete(match string) []Completion {
+	ret, _ := filepath.Glob(match + "*")
+	return completionsWithoutDescriptions(ret)
+}
+
+func (c *completion) skipPositional(s *parseState, n int) {
+	if n >= len(s.positional) {
+		s.positional = nil
+	} else {
+		s.positional = s.positional[n:]
+	}
+}
+
+func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion {
+	if short && len(match) != 0 {
+		return []Completion{
+			Completion{
+				Item: prefix + match,
+			},
+		}
+	}
+
+	var results []Completion
+	repeats := map[string]bool{}
+
+	for name, opt := range s.lookup.longNames {
+		if strings.HasPrefix(name, match) && !opt.Hidden {
+			results = append(results, Completion{
+				Item:        defaultLongOptDelimiter + name,
+				Description: opt.Description,
+			})
+
+			if short {
+				repeats[string(opt.ShortName)] = true
+			}
+		}
+	}
+
+	if short {
+		for name, opt := range s.lookup.shortNames {
+			if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden {
+				results = append(results, Completion{
+					Item:        string(defaultShortOptDelimiter) + name,
+					Description: opt.Description,
+				})
+			}
+		}
+	}
+
+	return results
+}
+
+func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, false)
+}
+
+func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, true)
+}
+
+func (c *completion) completeCommands(s *parseState, match string) []Completion {
+	n := make([]Completion, 0, len(s.command.commands))
+
+	for _, cmd := range s.command.commands {
+		if cmd.data != c && strings.HasPrefix(cmd.Name, match) {
+			n = append(n, Completion{
+				Item:        cmd.Name,
+				Description: cmd.ShortDescription,
+			})
+		}
+	}
+
+	return n
+}
+
+func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion {
+	if value.Kind() == reflect.Slice {
+		value = reflect.New(value.Type().Elem())
+	}
+	i := value.Interface()
+
+	var ret []Completion
+
+	if cmp, ok := i.(Completer); ok {
+		ret = cmp.Complete(match)
+	} else if value.CanAddr() {
+		if cmp, ok = value.Addr().Interface().(Completer); ok {
+			ret = cmp.Complete(match)
+		}
+	}
+
+	for i, v := range ret {
+		ret[i].Item = prefix + v.Item
+	}
+
+	return ret
+}
+
+func (c *completion) complete(args []string) []Completion {
+	if len(args) == 0 {
+		args = []string{""}
+	}
+
+	s := &parseState{
+		args: args,
+	}
+
+	c.parser.fillParseState(s)
+
+	var opt *Option
+
+	for len(s.args) > 1 {
+		arg := s.pop()
+
+		if (c.parser.Options&PassDoubleDash) != None && arg == "--" {
+			opt = nil
+			c.skipPositional(s, len(s.args)-1)
+
+			break
+		}
+
+		if argumentIsOption(arg) {
+			prefix, optname, islong := stripOptionPrefix(arg)
+			optname, _, argument := splitOption(prefix, optname, islong)
+
+			if argument == nil {
+				var o *Option
+				canarg := true
+
+				if islong {
+					o = s.lookup.longNames[optname]
+				} else {
+					for i, r := range optname {
+						sname := string(r)
+						o = s.lookup.shortNames[sname]
+
+						if o == nil {
+							break
+						}
+
+						if i == 0 && o.canArgument() && len(optname) != len(sname) {
+							canarg = false
+							break
+						}
+					}
+				}
+
+				if o == nil && (c.parser.Options&PassAfterNonOption) != None {
+					opt = nil
+					c.skipPositional(s, len(s.args)-1)
+
+					break
+				} else if o != nil && o.canArgument() && !o.OptionalArgument && canarg {
+					if len(s.args) > 1 {
+						s.pop()
+					} else {
+						opt = o
+					}
+				}
+			}
+		} else {
+			if len(s.positional) > 0 {
+				if !s.positional[0].isRemaining() {
+					// Don't advance beyond a remaining positional arg (because
+					// it consumes all subsequent args).
+					s.positional = s.positional[1:]
+				}
+			} else if cmd, ok := s.lookup.commands[arg]; ok {
+				cmd.fillParseState(s)
+			}
+
+			opt = nil
+		}
+	}
+
+	lastarg := s.args[len(s.args)-1]
+	var ret []Completion
+
+	if opt != nil {
+		// Completion for the argument of 'opt'
+		ret = c.completeValue(opt.value, "", lastarg)
+	} else if argumentStartsOption(lastarg) {
+		// Complete the option
+		prefix, optname, islong := stripOptionPrefix(lastarg)
+		optname, split, argument := splitOption(prefix, optname, islong)
+
+		if argument == nil && !islong {
+			rname, n := utf8.DecodeRuneInString(optname)
+			sname := string(rname)
+
+			if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() {
+				ret = c.completeValue(opt.value, prefix+sname, optname[n:])
+			} else {
+				ret = c.completeNamesForShortPrefix(s, prefix, optname)
+			}
+		} else if argument != nil {
+			if islong {
+				opt = s.lookup.longNames[optname]
+			} else {
+				opt = s.lookup.shortNames[optname]
+			}
+
+			if opt != nil {
+				ret = c.completeValue(opt.value, prefix+optname+split, *argument)
+			}
+		} else if islong {
+			ret = c.completeNamesForLongPrefix(s, prefix, optname)
+		} else {
+			ret = c.completeNamesForShortPrefix(s, prefix, optname)
+		}
+	} else if len(s.positional) > 0 {
+		// Complete for positional argument
+		ret = c.completeValue(s.positional[0].value, "", lastarg)
+	} else if len(s.command.commands) > 0 {
+		// Complete for command
+		ret = c.completeCommands(s, lastarg)
+	}
+
+	sort.Sort(completions(ret))
+	return ret
+}
+
+func (c *completion) print(items []Completion, showDescriptions bool) {
+	if showDescriptions && len(items) > 1 {
+		maxl := 0
+
+		for _, v := range items {
+			if len(v.Item) > maxl {
+				maxl = len(v.Item)
+			}
+		}
+
+		for _, v := range items {
+			fmt.Printf("%s", v.Item)
+
+			if len(v.Description) > 0 {
+				fmt.Printf("%s  # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description)
+			}
+
+			fmt.Printf("\n")
+		}
+	} else {
+		for _, v := range items {
+			fmt.Println(v.Item)
+		}
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go
new file mode 100644
index 0000000..984aac8
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/convert.go
@@ -0,0 +1,348 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// to a string representation of the flag.
+type Marshaler interface {
+	// MarshalFlag marshals a flag value to its string representation.
+	MarshalFlag() (string, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a flag
+// argument to themselves. The provided value is directly passed from the
+// command line.
+type Unmarshaler interface {
+	// UnmarshalFlag unmarshals a string value representation to the flag
+	// value (which therefore needs to be a pointer receiver).
+	UnmarshalFlag(value string) error
+}
+
+func getBase(options multiTag, base int) (int, error) {
+	sbase := options.Get("base")
+
+	var err error
+	var ivbase int64
+
+	if sbase != "" {
+		ivbase, err = strconv.ParseInt(sbase, 10, 32)
+		base = int(ivbase)
+	}
+
+	return base, err
+}
+
+func convertMarshal(val reflect.Value) (bool, string, error) {
+	// Check first for the Marshaler interface
+	if val.Type().NumMethod() > 0 && val.CanInterface() {
+		if marshaler, ok := val.Interface().(Marshaler); ok {
+			ret, err := marshaler.MarshalFlag()
+			return true, ret, err
+		}
+	}
+
+	return false, "", nil
+}
+
+func convertToString(val reflect.Value, options multiTag) (string, error) {
+	if ok, ret, err := convertMarshal(val); ok {
+		return ret, err
+	}
+
+	tp := val.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		stringer := val.Interface().(fmt.Stringer)
+		return stringer.String(), nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		return val.String(), nil
+	case reflect.Bool:
+		if val.Bool() {
+			return "true", nil
+		}
+
+		return "false", nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatInt(val.Int(), base), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatUint(val.Uint(), base), nil
+	case reflect.Float32, reflect.Float64:
+		return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil
+	case reflect.Slice:
+		if val.Len() == 0 {
+			return "", nil
+		}
+
+		ret := "["
+
+		for i := 0; i < val.Len(); i++ {
+			if i != 0 {
+				ret += ", "
+			}
+
+			item, err := convertToString(val.Index(i), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += item
+		}
+
+		return ret + "]", nil
+	case reflect.Map:
+		ret := "{"
+
+		for i, key := range val.MapKeys() {
+			if i != 0 {
+				ret += ", "
+			}
+
+			keyitem, err := convertToString(key, options)
+
+			if err != nil {
+				return "", err
+			}
+
+			item, err := convertToString(val.MapIndex(key), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += keyitem + ":" + item
+		}
+
+		return ret + "}", nil
+	case reflect.Ptr:
+		return convertToString(reflect.Indirect(val), options)
+	case reflect.Interface:
+		if !val.IsNil() {
+			return convertToString(val.Elem(), options)
+		}
+	}
+
+	return "", nil
+}
+
+func convertUnmarshal(val string, retval reflect.Value) (bool, error) {
+	if retval.Type().NumMethod() > 0 && retval.CanInterface() {
+		if unmarshaler, ok := retval.Interface().(Unmarshaler); ok {
+			if retval.IsNil() {
+				retval.Set(reflect.New(retval.Type().Elem()))
+
+				// Re-assign from the new value
+				unmarshaler = retval.Interface().(Unmarshaler)
+			}
+
+			return true, unmarshaler.UnmarshalFlag(val)
+		}
+	}
+
+	if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() {
+		return convertUnmarshal(val, retval.Addr())
+	}
+
+	if retval.Type().Kind() == reflect.Interface && !retval.IsNil() {
+		return convertUnmarshal(val, retval.Elem())
+	}
+
+	return false, nil
+}
+
+func convert(val string, retval reflect.Value, options multiTag) error {
+	if ok, err := convertUnmarshal(val, retval); ok {
+		return err
+	}
+
+	tp := retval.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		parsed, err := time.ParseDuration(val)
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(int64(parsed))
+		return nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		retval.SetString(val)
+	case reflect.Bool:
+		if val == "" {
+			retval.SetBool(true)
+		} else {
+			b, err := strconv.ParseBool(val)
+
+			if err != nil {
+				return err
+			}
+
+			retval.SetBool(b)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseInt(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(parsed)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseUint(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetUint(parsed)
+	case reflect.Float32, reflect.Float64:
+		parsed, err := strconv.ParseFloat(val, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetFloat(parsed)
+	case reflect.Slice:
+		elemtp := tp.Elem()
+
+		elemvalptr := reflect.New(elemtp)
+		elemval := reflect.Indirect(elemvalptr)
+
+		if err := convert(val, elemval, options); err != nil {
+			return err
+		}
+
+		retval.Set(reflect.Append(retval, elemval))
+	case reflect.Map:
+		parts := strings.SplitN(val, ":", 2)
+
+		key := parts[0]
+		var value string
+
+		if len(parts) == 2 {
+			value = parts[1]
+		}
+
+		keytp := tp.Key()
+		keyval := reflect.New(keytp)
+
+		if err := convert(key, keyval, options); err != nil {
+			return err
+		}
+
+		valuetp := tp.Elem()
+		valueval := reflect.New(valuetp)
+
+		if err := convert(value, valueval, options); err != nil {
+			return err
+		}
+
+		if retval.IsNil() {
+			retval.Set(reflect.MakeMap(tp))
+		}
+
+		retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval))
+	case reflect.Ptr:
+		if retval.IsNil() {
+			retval.Set(reflect.New(retval.Type().Elem()))
+		}
+
+		return convert(val, reflect.Indirect(retval), options)
+	case reflect.Interface:
+		if !retval.IsNil() {
+			return convert(val, retval.Elem(), options)
+		}
+	}
+
+	return nil
+}
+
+func isPrint(s string) bool {
+	for _, c := range s {
+		if !strconv.IsPrint(c) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func quoteIfNeeded(s string) string {
+	if !isPrint(s) {
+		return strconv.Quote(s)
+	}
+
+	return s
+}
+
+func quoteIfNeededV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = quoteIfNeeded(v)
+	}
+
+	return ret
+}
+
+func quoteV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = strconv.Quote(v)
+	}
+
+	return ret
+}
+
+func unquoteIfPossible(s string) (string, error) {
+	if len(s) == 0 || s[0] != '"' {
+		return s, nil
+	}
+
+	return strconv.Unquote(s)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go
new file mode 100644
index 0000000..05528d8
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/error.go
@@ -0,0 +1,134 @@
+package flags
+
+import (
+	"fmt"
+)
+
+// ErrorType represents the type of error.
+type ErrorType uint
+
+const (
+	// ErrUnknown indicates a generic error.
+	ErrUnknown ErrorType = iota
+
+	// ErrExpectedArgument indicates that an argument was expected.
+	ErrExpectedArgument
+
+	// ErrUnknownFlag indicates an unknown flag.
+	ErrUnknownFlag
+
+	// ErrUnknownGroup indicates an unknown group.
+	ErrUnknownGroup
+
+	// ErrMarshal indicates a marshalling error while converting values.
+	ErrMarshal
+
+	// ErrHelp indicates that the built-in help was shown (the error
+	// contains the help message).
+	ErrHelp
+
+	// ErrNoArgumentForBool indicates that an argument was given for a
+	// boolean flag (which don't not take any arguments).
+	ErrNoArgumentForBool
+
+	// ErrRequired indicates that a required flag was not provided.
+	ErrRequired
+
+	// ErrShortNameTooLong indicates that a short flag name was specified,
+	// longer than one character.
+	ErrShortNameTooLong
+
+	// ErrDuplicatedFlag indicates that a short or long flag has been
+	// defined more than once
+	ErrDuplicatedFlag
+
+	// ErrTag indicates an error while parsing flag tags.
+	ErrTag
+
+	// ErrCommandRequired indicates that a command was required but not
+	// specified
+	ErrCommandRequired
+
+	// ErrUnknownCommand indicates that an unknown command was specified.
+	ErrUnknownCommand
+
+	// ErrInvalidChoice indicates an invalid option value which only allows
+	// a certain number of choices.
+	ErrInvalidChoice
+
+	// ErrInvalidTag indicates an invalid tag or invalid use of an existing tag
+	ErrInvalidTag
+)
+
+func (e ErrorType) String() string {
+	switch e {
+	case ErrUnknown:
+		return "unknown"
+	case ErrExpectedArgument:
+		return "expected argument"
+	case ErrUnknownFlag:
+		return "unknown flag"
+	case ErrUnknownGroup:
+		return "unknown group"
+	case ErrMarshal:
+		return "marshal"
+	case ErrHelp:
+		return "help"
+	case ErrNoArgumentForBool:
+		return "no argument for bool"
+	case ErrRequired:
+		return "required"
+	case ErrShortNameTooLong:
+		return "short name too long"
+	case ErrDuplicatedFlag:
+		return "duplicated flag"
+	case ErrTag:
+		return "tag"
+	case ErrCommandRequired:
+		return "command required"
+	case ErrUnknownCommand:
+		return "unknown command"
+	case ErrInvalidChoice:
+		return "invalid choice"
+	case ErrInvalidTag:
+		return "invalid tag"
+	}
+
+	return "unrecognized error type"
+}
+
+// Error represents a parser error. The error returned from Parse is of this
+// type. The error contains both a Type and Message.
+type Error struct {
+	// The type of error
+	Type ErrorType
+
+	// The error message
+	Message string
+}
+
+// Error returns the error's message
+func (e *Error) Error() string {
+	return e.Message
+}
+
+func newError(tp ErrorType, message string) *Error {
+	return &Error{
+		Type:    tp,
+		Message: message,
+	}
+}
+
+func newErrorf(tp ErrorType, format string, args ...interface{}) *Error {
+	return newError(tp, fmt.Sprintf(format, args...))
+}
+
+func wrapError(err error) *Error {
+	ret, ok := err.(*Error)
+
+	if !ok {
+		return newError(ErrUnknown, err.Error())
+	}
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go
new file mode 100644
index 0000000..889762d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/flags.go
@@ -0,0 +1,258 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go built-in flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+
+Supported features
+
+The following features are supported in go-flags:
+
+    Options with short names (-v)
+    Options with long names (--verbose)
+    Options with and without arguments (bool v.s. other type)
+    Options with optional arguments and default values
+    Option default values from ENVIRONMENT_VARIABLES, including slice and map values
+    Multiple option groups each containing a set of options
+    Generate and print well-formatted help message
+    Passing remaining command line arguments after -- (optional)
+    Ignoring unknown command line options (optional)
+    Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+    Supports multiple short options -aux
+    Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+    Supports same option multiple times (can store in slice or last option counts)
+    Supports maps
+    Supports function callbacks
+    Supports namespaces for (nested) option groups
+
+Additional features specific to Windows:
+    Options with short names (/v)
+    Options with long names (/verbose)
+    Windows-style options with arguments use a colon as the delimiter
+    Modify generated help message with Windows-style / options
+    Windows style options can be disabled at build time using the "forceposix"
+    build tag
+
+
+Basic usage
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+    type Options struct {
+        Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+    }
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Slice options work exactly the same as primitive type options, except that
+whenever the option is encountered, a value is appended to the slice.
+
+Map options from string to primitive type are also supported. On the command
+line, you specify the value for such an option as key:value. For example
+
+    type Options struct {
+        AuthorInfo string[string] `short:"a"`
+    }
+
+Then, the AuthorInfo map can be filled with something like
+-a name:Jesse -a "surname:van den Kieboom".
+
+Finally, for full control over the conversion between command line argument
+values and options, user defined types can choose to implement the Marshaler
+and Unmarshaler interfaces.
+
+
+Available field tags
+
+The following is a list of tags for struct fields supported by go-flags:
+
+    short:            the short name of the option (single character)
+    long:             the long name of the option
+    required:         if non empty, makes the option required to appear on the command
+                      line. If a required option is not present, the parser will
+                      return ErrRequired (optional)
+    description:      the description of the option (optional)
+    long-description: the long description of the option. Currently only
+                      displayed in generated man pages (optional)
+    no-flag:          if non-empty, this field is ignored as an option (optional)
+
+    optional:       if non-empty, makes the argument of the option optional. When an
+                    argument is optional it can only be specified using
+                    --option=argument (optional)
+    optional-value: the value of an optional option when the option occurs
+                    without an argument. This tag can be specified multiple
+                    times in the case of maps or slices (optional)
+    default:        the default value of an option. This tag can be specified
+                    multiple times in the case of slices or maps (optional)
+    default-mask:   when specified, this value will be displayed in the help
+                    instead of the actual default value. This is useful
+                    mostly for hiding otherwise sensitive information from
+                    showing up in the help. If default-mask takes the special
+                    value "-", then no default value will be shown at all
+                    (optional)
+    env:            the default value of the option is overridden from the
+                    specified environment variable, if one has been defined.
+                    (optional)
+    env-delim:      the 'env' default value from environment is split into
+                    multiple values with the given delimiter string, use with
+                    slices and maps (optional)
+    value-name:     the name of the argument value (to be shown in the help)
+                    (optional)
+    choice:         limits the values for an option to a set of values.
+                    This tag can be specified multiple times (optional)
+    hidden:         if non-empty, the option is not visible in the help or man page.
+
+    base: a base (radix) used to convert strings to integer values, the
+          default base is 10 (i.e. decimal) (optional)
+
+    ini-name:       the explicit ini option name (optional)
+    no-ini:         if non-empty this field is ignored as an ini option
+                    (optional)
+
+    group:                when specified on a struct field, makes the struct
+                          field a separate group with the given name (optional)
+    namespace:            when specified on a group struct field, the namespace
+                          gets prepended to every option's long name and
+                          subgroup's namespace of this group, separated by
+                          the parser's namespace delimiter (optional)
+    command:              when specified on a struct field, makes the struct
+                          field a (sub)command with the given name (optional)
+    subcommands-optional: when specified on a command struct field, makes
+                          any subcommands of that command optional (optional)
+    alias:                when specified on a command struct field, adds the
+                          specified name as an alias for the command. Can be
+                          be specified multiple times to add more than one
+                          alias (optional)
+    positional-args:      when specified on a field with a struct type,
+                          uses the fields of that struct to parse remaining
+                          positional command line arguments into (in order
+                          of the fields). If a field has a slice type,
+                          then all remaining arguments will be added to it.
+                          Positional arguments are optional by default,
+                          unless the "required" tag is specified together
+                          with the "positional-args" tag. The "required" tag
+                          can also be set on the individual rest argument
+                          fields, to require only the first N positional
+                          arguments. If the "required" tag is set on the
+                          rest arguments slice, then its value determines
+                          the minimum amount of rest arguments that needs to
+                          be provided (e.g. `required:"2"`) (optional)
+    positional-arg-name:  used on a field in a positional argument struct; name
+                          of the positional argument placeholder to be shown in
+                          the help (optional)
+
+Either the `short:` tag or the `long:` must be specified to make the field eligible as an
+option.
+
+
+Option groups
+
+Option groups are a simple way to semantically separate your options. All
+options in a particular group are shown together in the help under the name
+of the group. Namespaces can be used to specify option long names more
+precisely and emphasize the options affiliation to their group.
+
+There are currently three ways to specify option groups.
+
+    1. Use NewNamedParser specifying the various option groups.
+    2. Use AddGroup to add a group to an existing parser.
+    3. Add a struct field to the top-level options annotated with the
+       group:"group-name" tag.
+
+
+
+Commands
+
+The flags package also has basic support for commands. Commands are often
+used in monolithic applications that support various commands or actions.
+Take git for example, all of the add, commit, checkout, etc. are called
+commands. Using commands you can easily separate multiple functions of your
+application.
+
+There are currently two ways to specify a command.
+
+    1. Use AddCommand on an existing parser.
+    2. Add a struct field to your options struct annotated with the
+       command:"command-name" tag.
+
+The most common, idiomatic way to implement commands is to define a global
+parser instance and implement each command in a separate file. These
+command files should define a go init function which calls AddCommand on
+the global parser.
+
+When parsing ends and there is an active command and that command implements
+the Commander interface, then its Execute method will be run with the
+remaining command line arguments.
+
+Command structs can have options which become valid to parse after the
+command has been specified on the command line, in addition to the options
+of all the parent commands. I.e. considering a -v flag on the parser and an
+add command, the following are equivalent:
+
+    ./app -v add
+    ./app add -v
+
+However, if the -v flag is defined on the add command, then the first of
+the two examples above would fail since the -v flag is not defined before
+the add command.
+
+
+Completion
+
+go-flags has builtin support to provide bash completion of flags, commands
+and argument values. To use completion, the binary which uses go-flags
+can be invoked in a special environment to list completion of the current
+command line argument. It should be noted that this `executes` your application,
+and it is up to the user to make sure there are no negative side effects (for
+example from init functions).
+
+Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion
+by replacing the argument parsing routine with the completion routine which
+outputs completions for the passed arguments. The basic invocation to
+complete a set of arguments is therefore:
+
+    GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3
+
+where `completion-example` is the binary, `arg1` and `arg2` are
+the current arguments, and `arg3` (the last argument) is the argument
+to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then
+descriptions of possible completion items will also be shown, if there
+are more than 1 completion items.
+
+To use this with bash completion, a simple file can be written which
+calls the binary which supports go-flags completion:
+
+    _completion_example() {
+        # All arguments except the first one
+        args=("${COMP_WORDS[@]:1:$COMP_CWORD}")
+
+        # Only split on newlines
+        local IFS=$'\n'
+
+        # Call completion (note that the first element of COMP_WORDS is
+        # the executable itself)
+        COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}"))
+        return 0
+    }
+
+    complete -F _completion_example completion-example
+
+Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set.
+
+Customized completion for argument values is supported by implementing
+the flags.Completer interface for the argument value type. An example
+of a type which does so is the flags.Filename type, an alias of string
+allowing simple filename completion. A slice or array argument value
+whose element type implements flags.Completer will also be completed.
+*/
+package flags
diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go
new file mode 100644
index 0000000..9e057ab
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/group.go
@@ -0,0 +1,406 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"errors"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// ErrNotPointerToStruct indicates that a provided data container is not
+// a pointer to a struct. Only pointers to structs are valid data containers
+// for options.
+var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct")
+
+// Group represents an option group. Option groups can be used to logically
+// group options together under a description. Groups are only used to provide
+// more structure to options both for the user (as displayed in the help message)
+// and for you, since groups can be nested.
+type Group struct {
+	// A short description of the group. The
+	// short description is primarily used in the built-in generated help
+	// message
+	ShortDescription string
+
+	// A long description of the group. The long
+	// description is primarily used to present information on commands
+	// (Command embeds Group) in the built-in generated help and man pages.
+	LongDescription string
+
+	// The namespace of the group
+	Namespace string
+
+	// If true, the group is not displayed in the help or man page
+	Hidden bool
+
+	// The parent of the group or nil if it has no parent
+	parent interface{}
+
+	// All the options in the group
+	options []*Option
+
+	// All the subgroups
+	groups []*Group
+
+	// Whether the group represents the built-in help group
+	isBuiltinHelp bool
+
+	data interface{}
+}
+
+type scanHandler func(reflect.Value, *reflect.StructField) (bool, error)
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = g
+
+	if err := group.scan(); err != nil {
+		return nil, err
+	}
+
+	g.groups = append(g.groups, group)
+	return group, nil
+}
+
+// Groups returns the list of groups embedded in this group.
+func (g *Group) Groups() []*Group {
+	return g.groups
+}
+
+// Options returns the list of options in this group.
+func (g *Group) Options() []*Option {
+	return g.options
+}
+
+// Find locates the subgroup with the given short description and returns it.
+// If no such group can be found Find will return nil. Note that the description
+// is matched case insensitively.
+func (g *Group) Find(shortDescription string) *Group {
+	lshortDescription := strings.ToLower(shortDescription)
+
+	var ret *Group
+
+	g.eachGroup(func(gg *Group) {
+		if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription {
+			ret = gg
+		}
+	})
+
+	return ret
+}
+
+func (g *Group) findOption(matcher func(*Option) bool) (option *Option) {
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if option == nil && matcher(opt) {
+				option = opt
+			}
+		}
+	})
+
+	return option
+}
+
+// FindOptionByLongName finds an option that is part of the group, or any of its
+// subgroups, by matching its long name (including the option namespace).
+func (g *Group) FindOptionByLongName(longName string) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.LongNameWithNamespace() == longName
+	})
+}
+
+// FindOptionByShortName finds an option that is part of the group, or any of
+// its subgroups, by matching its short name.
+func (g *Group) FindOptionByShortName(shortName rune) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.ShortName == shortName
+	})
+}
+
+func newGroup(shortDescription string, longDescription string, data interface{}) *Group {
+	return &Group{
+		ShortDescription: shortDescription,
+		LongDescription:  longDescription,
+
+		data: data,
+	}
+}
+
+func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option {
+	prio := 0
+	var retopt *Option
+
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if namematch != nil && namematch(opt, name) && prio < 4 {
+				retopt = opt
+				prio = 4
+			}
+
+			if name == opt.field.Name && prio < 3 {
+				retopt = opt
+				prio = 3
+			}
+
+			if name == opt.LongNameWithNamespace() && prio < 2 {
+				retopt = opt
+				prio = 2
+			}
+
+			if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 {
+				retopt = opt
+				prio = 1
+			}
+		}
+	})
+
+	return retopt
+}
+
+func (g *Group) eachGroup(f func(*Group)) {
+	f(g)
+
+	for _, gg := range g.groups {
+		gg.eachGroup(f)
+	}
+}
+
+func isStringFalsy(s string) bool {
+	return s == "" || s == "false" || s == "no" || s == "0"
+}
+
+func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error {
+	stype := realval.Type()
+
+	if sfield != nil {
+		if ok, err := handler(realval, sfield); err != nil {
+			return err
+		} else if ok {
+			return nil
+		}
+	}
+
+	for i := 0; i < stype.NumField(); i++ {
+		field := stype.Field(i)
+
+		// PkgName is set only for non-exported fields, which we ignore
+		if field.PkgPath != "" && !field.Anonymous {
+			continue
+		}
+
+		mtag := newMultiTag(string(field.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return err
+		}
+
+		// Skip fields with the no-flag tag
+		if mtag.Get("no-flag") != "" {
+			continue
+		}
+
+		// Dive deep into structs or pointers to structs
+		kind := field.Type.Kind()
+		fld := realval.Field(i)
+
+		if kind == reflect.Struct {
+			if err := g.scanStruct(fld, &field, handler); err != nil {
+				return err
+			}
+		} else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
+			flagCountBefore := len(g.options) + len(g.groups)
+
+			if fld.IsNil() {
+				fld = reflect.New(fld.Type().Elem())
+			}
+
+			if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil {
+				return err
+			}
+
+			if len(g.options)+len(g.groups) != flagCountBefore {
+				realval.Field(i).Set(fld)
+			}
+		}
+
+		longname := mtag.Get("long")
+		shortname := mtag.Get("short")
+
+		// Need at least either a short or long name
+		if longname == "" && shortname == "" && mtag.Get("ini-name") == "" {
+			continue
+		}
+
+		short := rune(0)
+		rc := utf8.RuneCountInString(shortname)
+
+		if rc > 1 {
+			return newErrorf(ErrShortNameTooLong,
+				"short names can only be 1 character long, not `%s'",
+				shortname)
+
+		} else if rc == 1 {
+			short, _ = utf8.DecodeRuneInString(shortname)
+		}
+
+		description := mtag.Get("description")
+		def := mtag.GetMany("default")
+
+		optionalValue := mtag.GetMany("optional-value")
+		valueName := mtag.Get("value-name")
+		defaultMask := mtag.Get("default-mask")
+
+		optional := !isStringFalsy(mtag.Get("optional"))
+		required := !isStringFalsy(mtag.Get("required"))
+		choices := mtag.GetMany("choice")
+		hidden := !isStringFalsy(mtag.Get("hidden"))
+
+		option := &Option{
+			Description:      description,
+			ShortName:        short,
+			LongName:         longname,
+			Default:          def,
+			EnvDefaultKey:    mtag.Get("env"),
+			EnvDefaultDelim:  mtag.Get("env-delim"),
+			OptionalArgument: optional,
+			OptionalValue:    optionalValue,
+			Required:         required,
+			ValueName:        valueName,
+			DefaultMask:      defaultMask,
+			Choices:          choices,
+			Hidden:           hidden,
+
+			group: g,
+
+			field: field,
+			value: realval.Field(i),
+			tag:   mtag,
+		}
+
+		if option.isBool() && option.Default != nil {
+			return newErrorf(ErrInvalidTag,
+				"boolean flag `%s' may not have default values, they always default to `false' and can only be turned on",
+				option.shortAndLongName())
+		}
+
+		g.options = append(g.options, option)
+	}
+
+	return nil
+}
+
+func (g *Group) checkForDuplicateFlags() *Error {
+	shortNames := make(map[rune]*Option)
+	longNames := make(map[string]*Option)
+
+	var duplicateError *Error
+
+	g.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.LongName != "" {
+				longName := option.LongNameWithNamespace()
+
+				if otherOption, ok := longNames[longName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption)
+					return
+				}
+				longNames[longName] = option
+			}
+			if option.ShortName != 0 {
+				if otherOption, ok := shortNames[option.ShortName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption)
+					return
+				}
+				shortNames[option.ShortName] = option
+			}
+		}
+	})
+
+	return duplicateError
+}
+
+func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+	mtag := newMultiTag(string(sfield.Tag))
+
+	if err := mtag.Parse(); err != nil {
+		return true, err
+	}
+
+	subgroup := mtag.Get("group")
+
+	if len(subgroup) != 0 {
+		var ptrval reflect.Value
+
+		if realval.Kind() == reflect.Ptr {
+			ptrval = realval
+
+			if ptrval.IsNil() {
+				ptrval.Set(reflect.New(ptrval.Type()))
+			}
+		} else {
+			ptrval = realval.Addr()
+		}
+
+		description := mtag.Get("description")
+
+		group, err := g.AddGroup(subgroup, description, ptrval.Interface())
+
+		if err != nil {
+			return true, err
+		}
+
+		group.Namespace = mtag.Get("namespace")
+		group.Hidden = mtag.Get("hidden") != ""
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func (g *Group) scanType(handler scanHandler) error {
+	// Get all the public fields in the data struct
+	ptrval := reflect.ValueOf(g.data)
+
+	if ptrval.Type().Kind() != reflect.Ptr {
+		panic(ErrNotPointerToStruct)
+	}
+
+	stype := ptrval.Type().Elem()
+
+	if stype.Kind() != reflect.Struct {
+		panic(ErrNotPointerToStruct)
+	}
+
+	realval := reflect.Indirect(ptrval)
+
+	if err := g.scanStruct(realval, nil, handler); err != nil {
+		return err
+	}
+
+	if err := g.checkForDuplicateFlags(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (g *Group) scan() error {
+	return g.scanType(g.scanSubGroupHandler)
+}
+
+func (g *Group) groupByName(name string) *Group {
+	if len(name) == 0 {
+		return g
+	}
+
+	return g.Find(name)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go
new file mode 100644
index 0000000..d380305
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/help.go
@@ -0,0 +1,491 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"unicode/utf8"
+)
+
+type alignmentInfo struct {
+	maxLongLen      int
+	hasShort        bool
+	hasValueName    bool
+	terminalColumns int
+	indent          bool
+}
+
+const (
+	paddingBeforeOption                 = 2
+	distanceBetweenOptionAndDescription = 2
+)
+
+func (a *alignmentInfo) descriptionStart() int {
+	ret := a.maxLongLen + distanceBetweenOptionAndDescription
+
+	if a.hasShort {
+		ret += 2
+	}
+
+	if a.maxLongLen > 0 {
+		ret += 4
+	}
+
+	if a.hasValueName {
+		ret += 3
+	}
+
+	return ret
+}
+
+func (a *alignmentInfo) updateLen(name string, indent bool) {
+	l := utf8.RuneCountInString(name)
+
+	if indent {
+		l = l + 4
+	}
+
+	if l > a.maxLongLen {
+		a.maxLongLen = l
+	}
+}
+
+func (p *Parser) getAlignmentInfo() alignmentInfo {
+	ret := alignmentInfo{
+		maxLongLen:      0,
+		hasShort:        false,
+		hasValueName:    false,
+		terminalColumns: getTerminalColumns(),
+	}
+
+	if ret.terminalColumns <= 0 {
+		ret.terminalColumns = 80
+	}
+
+	var prevcmd *Command
+
+	p.eachActiveGroup(func(c *Command, grp *Group) {
+		if c != prevcmd {
+			for _, arg := range c.args {
+				ret.updateLen(arg.Name, c != p.Command)
+			}
+		}
+
+		for _, info := range grp.options {
+			if !info.canCli() {
+				continue
+			}
+
+			if info.ShortName != 0 {
+				ret.hasShort = true
+			}
+
+			if len(info.ValueName) > 0 {
+				ret.hasValueName = true
+			}
+
+			l := info.LongNameWithNamespace() + info.ValueName
+
+			if len(info.Choices) != 0 {
+				l += "[" + strings.Join(info.Choices, "|") + "]"
+			}
+
+			ret.updateLen(l, c != p.Command)
+		}
+	})
+
+	return ret
+}
+
+func wrapText(s string, l int, prefix string) string {
+	var ret string
+
+	if l < 10 {
+		l = 10
+	}
+
+	// Basic text wrapping of s at spaces to fit in l
+	lines := strings.Split(s, "\n")
+
+	for _, line := range lines {
+		var retline string
+
+		line = strings.TrimSpace(line)
+
+		for len(line) > l {
+			// Try to split on space
+			suffix := ""
+
+			pos := strings.LastIndex(line[:l], " ")
+
+			if pos < 0 {
+				pos = l - 1
+				suffix = "-\n"
+			}
+
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += strings.TrimSpace(line[:pos]) + suffix
+			line = strings.TrimSpace(line[pos:])
+		}
+
+		if len(line) > 0 {
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += line
+		}
+
+		if len(ret) > 0 {
+			ret += "\n"
+
+			if len(retline) > 0 {
+				ret += prefix
+			}
+		}
+
+		ret += retline
+	}
+
+	return ret
+}
+
+func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) {
+	line := &bytes.Buffer{}
+
+	prefix := paddingBeforeOption
+
+	if info.indent {
+		prefix += 4
+	}
+
+	if option.Hidden {
+		return
+	}
+
+	line.WriteString(strings.Repeat(" ", prefix))
+
+	if option.ShortName != 0 {
+		line.WriteRune(defaultShortOptDelimiter)
+		line.WriteRune(option.ShortName)
+	} else if info.hasShort {
+		line.WriteString("  ")
+	}
+
+	descstart := info.descriptionStart() + paddingBeforeOption
+
+	if len(option.LongName) > 0 {
+		if option.ShortName != 0 {
+			line.WriteString(", ")
+		} else if info.hasShort {
+			line.WriteString("  ")
+		}
+
+		line.WriteString(defaultLongOptDelimiter)
+		line.WriteString(option.LongNameWithNamespace())
+	}
+
+	if option.canArgument() {
+		line.WriteRune(defaultNameArgDelimiter)
+
+		if len(option.ValueName) > 0 {
+			line.WriteString(option.ValueName)
+		}
+
+		if len(option.Choices) > 0 {
+			line.WriteString("[" + strings.Join(option.Choices, "|") + "]")
+		}
+	}
+
+	written := line.Len()
+	line.WriteTo(writer)
+
+	if option.Description != "" {
+		dw := descstart - written
+		writer.WriteString(strings.Repeat(" ", dw))
+
+		var def string
+
+		if len(option.DefaultMask) != 0 {
+			if option.DefaultMask != "-" {
+				def = option.DefaultMask
+			}
+		} else {
+			def = option.defaultLiteral
+		}
+
+		var envDef string
+		if option.EnvDefaultKey != "" {
+			var envPrintable string
+			if runtime.GOOS == "windows" {
+				envPrintable = "%" + option.EnvDefaultKey + "%"
+			} else {
+				envPrintable = "$" + option.EnvDefaultKey
+			}
+			envDef = fmt.Sprintf(" [%s]", envPrintable)
+		}
+
+		var desc string
+
+		if def != "" {
+			desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef)
+		} else {
+			desc = option.Description + envDef
+		}
+
+		writer.WriteString(wrapText(desc,
+			info.terminalColumns-descstart,
+			strings.Repeat(" ", descstart)))
+	}
+
+	writer.WriteString("\n")
+}
+
+func maxCommandLength(s []*Command) int {
+	if len(s) == 0 {
+		return 0
+	}
+
+	ret := len(s[0].Name)
+
+	for _, v := range s[1:] {
+		l := len(v.Name)
+
+		if l > ret {
+			ret = l
+		}
+	}
+
+	return ret
+}
+
+// WriteHelp writes a help message containing all the possible options and
+// their descriptions to the provided writer. Note that the HelpFlag parser
+// option provides a convenient way to add a -h/--help option group to the
+// command line parser which will automatically show the help messages using
+// this method.
+func (p *Parser) WriteHelp(writer io.Writer) {
+	if writer == nil {
+		return
+	}
+
+	wr := bufio.NewWriter(writer)
+	aligninfo := p.getAlignmentInfo()
+
+	cmd := p.Command
+
+	for cmd.Active != nil {
+		cmd = cmd.Active
+	}
+
+	if p.Name != "" {
+		wr.WriteString("Usage:\n")
+		wr.WriteString(" ")
+
+		allcmd := p.Command
+
+		for allcmd != nil {
+			var usage string
+
+			if allcmd == p.Command {
+				if len(p.Usage) != 0 {
+					usage = p.Usage
+				} else if p.Options&HelpFlag != 0 {
+					usage = "[OPTIONS]"
+				}
+			} else if us, ok := allcmd.data.(Usage); ok {
+				usage = us.Usage()
+			} else if allcmd.hasCliOptions() {
+				usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name)
+			}
+
+			if len(usage) != 0 {
+				fmt.Fprintf(wr, " %s %s", allcmd.Name, usage)
+			} else {
+				fmt.Fprintf(wr, " %s", allcmd.Name)
+			}
+
+			if len(allcmd.args) > 0 {
+				fmt.Fprintf(wr, " ")
+			}
+
+			for i, arg := range allcmd.args {
+				if i != 0 {
+					fmt.Fprintf(wr, " ")
+				}
+
+				name := arg.Name
+
+				if arg.isRemaining() {
+					name = name + "..."
+				}
+
+				if !allcmd.ArgsRequired {
+					fmt.Fprintf(wr, "[%s]", name)
+				} else {
+					fmt.Fprintf(wr, "%s", name)
+				}
+			}
+
+			if allcmd.Active == nil && len(allcmd.commands) > 0 {
+				var co, cc string
+
+				if allcmd.SubcommandsOptional {
+					co, cc = "[", "]"
+				} else {
+					co, cc = "<", ">"
+				}
+
+				visibleCommands := allcmd.visibleCommands()
+
+				if len(visibleCommands) > 3 {
+					fmt.Fprintf(wr, " %scommand%s", co, cc)
+				} else {
+					subcommands := allcmd.sortedVisibleCommands()
+					names := make([]string, len(subcommands))
+
+					for i, subc := range subcommands {
+						names[i] = subc.Name
+					}
+
+					fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc)
+				}
+			}
+
+			allcmd = allcmd.Active
+		}
+
+		fmt.Fprintln(wr)
+
+		if len(cmd.LongDescription) != 0 {
+			fmt.Fprintln(wr)
+
+			t := wrapText(cmd.LongDescription,
+				aligninfo.terminalColumns,
+				"")
+
+			fmt.Fprintln(wr, t)
+		}
+	}
+
+	c := p.Command
+
+	for c != nil {
+		printcmd := c != p.Command
+
+		c.eachGroup(func(grp *Group) {
+			first := true
+
+			// Skip built-in help group for all commands except the top-level
+			// parser
+			if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) {
+				return
+			}
+
+			for _, info := range grp.options {
+				if !info.canCli() || info.Hidden {
+					continue
+				}
+
+				if printcmd {
+					fmt.Fprintf(wr, "\n[%s command options]\n", c.Name)
+					aligninfo.indent = true
+					printcmd = false
+				}
+
+				if first && cmd.Group != grp {
+					fmt.Fprintln(wr)
+
+					if aligninfo.indent {
+						wr.WriteString("    ")
+					}
+
+					fmt.Fprintf(wr, "%s:\n", grp.ShortDescription)
+					first = false
+				}
+
+				p.writeHelpOption(wr, info, aligninfo)
+			}
+		})
+
+		var args []*Arg
+		for _, arg := range c.args {
+			if arg.Description != "" {
+				args = append(args, arg)
+			}
+		}
+
+		if len(args) > 0 {
+			if c == p.Command {
+				fmt.Fprintf(wr, "\nArguments:\n")
+			} else {
+				fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name)
+			}
+
+			descStart := aligninfo.descriptionStart() + paddingBeforeOption
+
+			for _, arg := range args {
+				argPrefix := strings.Repeat(" ", paddingBeforeOption)
+				argPrefix += arg.Name
+
+				if len(arg.Description) > 0 {
+					argPrefix += ":"
+					wr.WriteString(argPrefix)
+
+					// Space between "arg:" and the description start
+					descPadding := strings.Repeat(" ", descStart-len(argPrefix))
+					// How much space the description gets before wrapping
+					descWidth := aligninfo.terminalColumns - 1 - descStart
+					// Whitespace to which we can indent new description lines
+					descPrefix := strings.Repeat(" ", descStart)
+
+					wr.WriteString(descPadding)
+					wr.WriteString(wrapText(arg.Description, descWidth, descPrefix))
+				} else {
+					wr.WriteString(argPrefix)
+				}
+
+				fmt.Fprintln(wr)
+			}
+		}
+
+		c = c.Active
+	}
+
+	scommands := cmd.sortedVisibleCommands()
+
+	if len(scommands) > 0 {
+		maxnamelen := maxCommandLength(scommands)
+
+		fmt.Fprintln(wr)
+		fmt.Fprintln(wr, "Available commands:")
+
+		for _, c := range scommands {
+			fmt.Fprintf(wr, "  %s", c.Name)
+
+			if len(c.ShortDescription) > 0 {
+				pad := strings.Repeat(" ", maxnamelen-len(c.Name))
+				fmt.Fprintf(wr, "%s  %s", pad, c.ShortDescription)
+
+				if len(c.Aliases) > 0 {
+					fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", "))
+				}
+
+			}
+
+			fmt.Fprintln(wr)
+		}
+	}
+
+	wr.Flush()
+}
diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go
new file mode 100644
index 0000000..e714d3d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/ini.go
@@ -0,0 +1,597 @@
+package flags
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// IniError contains location information on where an error occurred.
+type IniError struct {
+	// The error message.
+	Message string
+
+	// The filename of the file in which the error occurred.
+	File string
+
+	// The line number at which the error occurred.
+	LineNumber uint
+}
+
+// Error provides a "file:line: message" formatted message of the ini error.
+func (x *IniError) Error() string {
+	return fmt.Sprintf(
+		"%s:%d: %s",
+		x.File,
+		x.LineNumber,
+		x.Message,
+	)
+}
+
+// IniOptions for writing
+type IniOptions uint
+
+const (
+	// IniNone indicates no options.
+	IniNone IniOptions = 0
+
+	// IniIncludeDefaults indicates that default values should be written.
+	IniIncludeDefaults = 1 << iota
+
+	// IniCommentDefaults indicates that if IniIncludeDefaults is used
+	// options with default values are written but commented out.
+	IniCommentDefaults
+
+	// IniIncludeComments indicates that comments containing the description
+	// of an option should be written.
+	IniIncludeComments
+
+	// IniDefault provides a default set of options.
+	IniDefault = IniIncludeComments
+)
+
+// IniParser is a utility to read and write flags options from and to ini
+// formatted strings.
+type IniParser struct {
+	ParseAsDefaults bool // override default flags
+
+	parser *Parser
+}
+
+type iniValue struct {
+	Name       string
+	Value      string
+	Quoted     bool
+	LineNumber uint
+}
+
+type iniSection []iniValue
+
+type ini struct {
+	File     string
+	Sections map[string]iniSection
+}
+
+// NewIniParser creates a new ini parser for a given Parser.
+func NewIniParser(p *Parser) *IniParser {
+	return &IniParser{
+		parser: p,
+	}
+}
+
+// IniParse is a convenience function to parse command line options with default
+// settings from an ini formatted file. The provided data is a pointer to a struct
+// representing the default option group (named "Application Options"). For
+// more control, use flags.NewParser.
+func IniParse(filename string, data interface{}) error {
+	p := NewParser(data, Default)
+
+	return NewIniParser(p).ParseFile(filename)
+}
+
+// ParseFile parses flags from an ini formatted file. See Parse for more
+// information on the ini file format. The returned errors can be of the type
+// flags.Error or flags.IniError.
+func (i *IniParser) ParseFile(filename string) error {
+	ini, err := readIniFromFile(filename)
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// Parse parses flags from an ini format. You can use ParseFile as a
+// convenience function to parse from a filename instead of a general
+// io.Reader.
+//
+// The format of the ini file is as follows:
+//
+//     [Option group name]
+//     option = value
+//
+// Each section in the ini file represents an option group or command in the
+// flags parser. The default flags parser option group (i.e. when using
+// flags.Parse) is named 'Application Options'. The ini option name is matched
+// in the following order:
+//
+//     1. Compared to the ini-name tag on the option struct field (if present)
+//     2. Compared to the struct field name
+//     3. Compared to the option long name (if present)
+//     4. Compared to the option short name (if present)
+//
+// Sections for nested groups and commands can be addressed using a dot `.'
+// namespacing notation (i.e [subcommand.Options]). Group section names are
+// matched case insensitive.
+//
+// The returned errors can be of the type flags.Error or flags.IniError.
+func (i *IniParser) Parse(reader io.Reader) error {
+	ini, err := readIni(reader, "")
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// WriteFile writes the flags as ini format into a file. See Write
+// for more information. The returned error occurs when the specified file
+// could not be opened for writing.
+func (i *IniParser) WriteFile(filename string, options IniOptions) error {
+	return writeIniToFile(i, filename, options)
+}
+
+// Write writes the current values of all the flags to an ini format.
+// See Parse for more information on the ini file format. You typically
+// call this only after settings have been parsed since the default values of each
+// option are stored just before parsing the flags (this is only relevant when
+// IniIncludeDefaults is _not_ set in options).
+func (i *IniParser) Write(writer io.Writer, options IniOptions) {
+	writeIni(i, writer, options)
+}
+
+func readFullLine(reader *bufio.Reader) (string, error) {
+	var line []byte
+
+	for {
+		l, more, err := reader.ReadLine()
+
+		if err != nil {
+			return "", err
+		}
+
+		if line == nil && !more {
+			return string(l), nil
+		}
+
+		line = append(line, l...)
+
+		if !more {
+			break
+		}
+	}
+
+	return string(line), nil
+}
+
+func optionIniName(option *Option) string {
+	name := option.tag.Get("_read-ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	name = option.tag.Get("ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	return option.field.Name
+}
+
+func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) {
+	var sname string
+
+	if len(namespace) != 0 {
+		sname = namespace
+	}
+
+	if cmd.Group != group && len(group.ShortDescription) != 0 {
+		if len(sname) != 0 {
+			sname += "."
+		}
+
+		sname += group.ShortDescription
+	}
+
+	sectionwritten := false
+	comments := (options & IniIncludeComments) != IniNone
+
+	for _, option := range group.options {
+		if option.isFunc() || option.Hidden {
+			continue
+		}
+
+		if len(option.tag.Get("no-ini")) != 0 {
+			continue
+		}
+
+		val := option.value
+
+		if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() {
+			continue
+		}
+
+		if !sectionwritten {
+			fmt.Fprintf(writer, "[%s]\n", sname)
+			sectionwritten = true
+		}
+
+		if comments && len(option.Description) != 0 {
+			fmt.Fprintf(writer, "; %s\n", option.Description)
+		}
+
+		oname := optionIniName(option)
+
+		commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault()
+
+		kind := val.Type().Kind()
+		switch kind {
+		case reflect.Slice:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				for idx := 0; idx < val.Len(); idx++ {
+					v, _ := convertToString(val.Index(idx), option.tag)
+
+					writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+				}
+			}
+		case reflect.Map:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				mkeys := val.MapKeys()
+				keys := make([]string, len(val.MapKeys()))
+				kkmap := make(map[string]reflect.Value)
+
+				for i, k := range mkeys {
+					keys[i], _ = convertToString(k, option.tag)
+					kkmap[keys[i]] = k
+				}
+
+				sort.Strings(keys)
+
+				for _, k := range keys {
+					v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag)
+
+					writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote)
+				}
+			}
+		default:
+			v, _ := convertToString(val, option.tag)
+
+			writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+		}
+
+		if comments {
+			fmt.Fprintln(writer)
+		}
+	}
+
+	if sectionwritten && !comments {
+		fmt.Fprintln(writer)
+	}
+}
+
+func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) {
+	if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) {
+		optionValue = strconv.Quote(optionValue)
+	}
+
+	comment := ""
+	if commentOption {
+		comment = "; "
+	}
+
+	fmt.Fprintf(writer, "%s%s =", comment, optionName)
+
+	if optionKey != "" {
+		fmt.Fprintf(writer, " %s:%s", optionKey, optionValue)
+	} else if optionValue != "" {
+		fmt.Fprintf(writer, " %s", optionValue)
+	}
+
+	fmt.Fprintln(writer)
+}
+
+func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) {
+	command.eachGroup(func(group *Group) {
+		if !group.Hidden {
+			writeGroupIni(command, group, namespace, writer, options)
+		}
+	})
+
+	for _, c := range command.commands {
+		var nns string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(namespace) != 0 {
+			nns = c.Name + "." + nns
+		} else {
+			nns = c.Name
+		}
+
+		writeCommandIni(c, nns, writer, options)
+	}
+}
+
+func writeIni(parser *IniParser, writer io.Writer, options IniOptions) {
+	writeCommandIni(parser.parser.Command, "", writer, options)
+}
+
+func writeIniToFile(parser *IniParser, filename string, options IniOptions) error {
+	file, err := os.Create(filename)
+
+	if err != nil {
+		return err
+	}
+
+	defer file.Close()
+
+	writeIni(parser, file, options)
+
+	return nil
+}
+
+func readIniFromFile(filename string) (*ini, error) {
+	file, err := os.Open(filename)
+
+	if err != nil {
+		return nil, err
+	}
+
+	defer file.Close()
+
+	return readIni(file, filename)
+}
+
+func readIni(contents io.Reader, filename string) (*ini, error) {
+	ret := &ini{
+		File:     filename,
+		Sections: make(map[string]iniSection),
+	}
+
+	reader := bufio.NewReader(contents)
+
+	// Empty global section
+	section := make(iniSection, 0, 10)
+	sectionname := ""
+
+	ret.Sections[sectionname] = section
+
+	var lineno uint
+
+	for {
+		line, err := readFullLine(reader)
+
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, err
+		}
+
+		lineno++
+		line = strings.TrimSpace(line)
+
+		// Skip empty lines and lines starting with ; (comments)
+		if len(line) == 0 || line[0] == ';' || line[0] == '#' {
+			continue
+		}
+
+		if line[0] == '[' {
+			if line[0] != '[' || line[len(line)-1] != ']' {
+				return nil, &IniError{
+					Message:    "malformed section header",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			name := strings.TrimSpace(line[1 : len(line)-1])
+
+			if len(name) == 0 {
+				return nil, &IniError{
+					Message:    "empty section name",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			sectionname = name
+			section = ret.Sections[name]
+
+			if section == nil {
+				section = make(iniSection, 0, 10)
+				ret.Sections[name] = section
+			}
+
+			continue
+		}
+
+		// Parse option here
+		keyval := strings.SplitN(line, "=", 2)
+
+		if len(keyval) != 2 {
+			return nil, &IniError{
+				Message:    fmt.Sprintf("malformed key=value (%s)", line),
+				File:       filename,
+				LineNumber: lineno,
+			}
+		}
+
+		name := strings.TrimSpace(keyval[0])
+		value := strings.TrimSpace(keyval[1])
+		quoted := false
+
+		if len(value) != 0 && value[0] == '"' {
+			if v, err := strconv.Unquote(value); err == nil {
+				value = v
+
+				quoted = true
+			} else {
+				return nil, &IniError{
+					Message:    err.Error(),
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+		}
+
+		section = append(section, iniValue{
+			Name:       name,
+			Value:      value,
+			Quoted:     quoted,
+			LineNumber: lineno,
+		})
+
+		ret.Sections[sectionname] = section
+	}
+
+	return ret, nil
+}
+
+func (i *IniParser) matchingGroups(name string) []*Group {
+	if len(name) == 0 {
+		var ret []*Group
+
+		i.parser.eachGroup(func(g *Group) {
+			ret = append(ret, g)
+		})
+
+		return ret
+	}
+
+	g := i.parser.groupByName(name)
+
+	if g != nil {
+		return []*Group{g}
+	}
+
+	return nil
+}
+
+func (i *IniParser) parse(ini *ini) error {
+	p := i.parser
+
+	var quotesLookup = make(map[*Option]bool)
+
+	for name, section := range ini.Sections {
+		groups := i.matchingGroups(name)
+
+		if len(groups) == 0 {
+			return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+		}
+
+		for _, inival := range section {
+			var opt *Option
+
+			for _, group := range groups {
+				opt = group.optionByName(inival.Name, func(o *Option, n string) bool {
+					return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n)
+				})
+
+				if opt != nil && len(opt.tag.Get("no-ini")) != 0 {
+					opt = nil
+				}
+
+				if opt != nil {
+					break
+				}
+			}
+
+			if opt == nil {
+				if (p.Options & IgnoreUnknown) == None {
+					return &IniError{
+						Message:    fmt.Sprintf("unknown option: %s", inival.Name),
+						File:       ini.File,
+						LineNumber: inival.LineNumber,
+					}
+				}
+
+				continue
+			}
+
+			// ini value is ignored if override is set and
+			// value was previously set from non default
+			if i.ParseAsDefaults && !opt.isSetDefault {
+				continue
+			}
+
+			pval := &inival.Value
+
+			if !opt.canArgument() && len(inival.Value) == 0 {
+				pval = nil
+			} else {
+				if opt.value.Type().Kind() == reflect.Map {
+					parts := strings.SplitN(inival.Value, ":", 2)
+
+					// only handle unquoting
+					if len(parts) == 2 && parts[1][0] == '"' {
+						if v, err := strconv.Unquote(parts[1]); err == nil {
+							parts[1] = v
+
+							inival.Quoted = true
+						} else {
+							return &IniError{
+								Message:    err.Error(),
+								File:       ini.File,
+								LineNumber: inival.LineNumber,
+							}
+						}
+
+						s := parts[0] + ":" + parts[1]
+
+						pval = &s
+					}
+				}
+			}
+
+			if err := opt.set(pval); err != nil {
+				return &IniError{
+					Message:    err.Error(),
+					File:       ini.File,
+					LineNumber: inival.LineNumber,
+				}
+			}
+
+			// either all INI values are quoted or only values who need quoting
+			if _, ok := quotesLookup[opt]; !inival.Quoted || !ok {
+				quotesLookup[opt] = inival.Quoted
+			}
+
+			opt.tag.Set("_read-ini-name", inival.Name)
+		}
+	}
+
+	for opt, quoted := range quotesLookup {
+		opt.iniQuote = quoted
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go
new file mode 100644
index 0000000..0cb114e
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/man.go
@@ -0,0 +1,205 @@
+package flags
+
+import (
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"time"
+)
+
+func manQuote(s string) string {
+	return strings.Replace(s, "\\", "\\\\", -1)
+}
+
+func formatForMan(wr io.Writer, s string) {
+	for {
+		idx := strings.IndexRune(s, '`')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", manQuote(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "%s", manQuote(s[:idx]))
+
+		s = s[idx+1:]
+		idx = strings.IndexRune(s, '\'')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", manQuote(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "\\fB%s\\fP", manQuote(s[:idx]))
+		s = s[idx+1:]
+	}
+}
+
+func writeManPageOptions(wr io.Writer, grp *Group) {
+	grp.eachGroup(func(group *Group) {
+		if group.Hidden || len(group.options) == 0 {
+			return
+		}
+
+		// If the parent (grp) has any subgroups, display their descriptions as
+		// subsection headers similar to the output of --help.
+		if group.ShortDescription != "" && len(grp.groups) > 0 {
+			fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription)
+
+			if group.LongDescription != "" {
+				formatForMan(wr, group.LongDescription)
+				fmt.Fprintln(wr, "")
+			}
+		}
+
+		for _, opt := range group.options {
+			if !opt.canCli() || opt.Hidden {
+				continue
+			}
+
+			fmt.Fprintln(wr, ".TP")
+			fmt.Fprintf(wr, "\\fB")
+
+			if opt.ShortName != 0 {
+				fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName)
+			}
+
+			if len(opt.LongName) != 0 {
+				if opt.ShortName != 0 {
+					fmt.Fprintf(wr, ", ")
+				}
+
+				fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace()))
+			}
+
+			if len(opt.ValueName) != 0 || opt.OptionalArgument {
+				if opt.OptionalArgument {
+					fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", ")))
+				} else {
+					fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName))
+				}
+			}
+
+			if len(opt.Default) != 0 {
+				fmt.Fprintf(wr, " <default: \\fI%s\\fR>", manQuote(strings.Join(quoteV(opt.Default), ", ")))
+			} else if len(opt.EnvDefaultKey) != 0 {
+				if runtime.GOOS == "windows" {
+					fmt.Fprintf(wr, " <default: \\fI%%%s%%\\fR>", manQuote(opt.EnvDefaultKey))
+				} else {
+					fmt.Fprintf(wr, " <default: \\fI$%s\\fR>", manQuote(opt.EnvDefaultKey))
+				}
+			}
+
+			if opt.Required {
+				fmt.Fprintf(wr, " (\\fIrequired\\fR)")
+			}
+
+			fmt.Fprintln(wr, "\\fP")
+
+			if len(opt.Description) != 0 {
+				formatForMan(wr, opt.Description)
+				fmt.Fprintln(wr, "")
+			}
+		}
+	})
+}
+
+func writeManPageSubcommands(wr io.Writer, name string, root *Command) {
+	commands := root.sortedVisibleCommands()
+
+	for _, c := range commands {
+		var nn string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(name) != 0 {
+			nn = name + " " + c.Name
+		} else {
+			nn = c.Name
+		}
+
+		writeManPageCommand(wr, nn, root, c)
+	}
+}
+
+func writeManPageCommand(wr io.Writer, name string, root *Command, command *Command) {
+	fmt.Fprintf(wr, ".SS %s\n", name)
+	fmt.Fprintln(wr, command.ShortDescription)
+
+	if len(command.LongDescription) > 0 {
+		fmt.Fprintln(wr, "")
+
+		cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name))
+
+		if strings.HasPrefix(command.LongDescription, cmdstart) {
+			fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name))
+
+			formatForMan(wr, command.LongDescription[len(cmdstart):])
+			fmt.Fprintln(wr, "")
+		} else {
+			formatForMan(wr, command.LongDescription)
+			fmt.Fprintln(wr, "")
+		}
+	}
+
+	var usage string
+	if us, ok := command.data.(Usage); ok {
+		usage = us.Usage()
+	} else if command.hasCliOptions() {
+		usage = fmt.Sprintf("[%s-OPTIONS]", command.Name)
+	}
+
+	var pre string
+	if root.hasCliOptions() {
+		pre = fmt.Sprintf("%s [OPTIONS] %s", root.Name, command.Name)
+	} else {
+		pre = fmt.Sprintf("%s %s", root.Name, command.Name)
+	}
+
+	if len(usage) > 0 {
+		fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage))
+	}
+
+	if len(command.Aliases) > 0 {
+		fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", ")))
+	}
+
+	writeManPageOptions(wr, command.Group)
+	writeManPageSubcommands(wr, name, command)
+}
+
+// WriteManPage writes a basic man page in groff format to the specified
+// writer.
+func (p *Parser) WriteManPage(wr io.Writer) {
+	t := time.Now()
+
+	fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006"))
+	fmt.Fprintln(wr, ".SH NAME")
+	fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuote(p.ShortDescription))
+	fmt.Fprintln(wr, ".SH SYNOPSIS")
+
+	usage := p.Usage
+
+	if len(usage) == 0 {
+		usage = "[OPTIONS]"
+	}
+
+	fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage))
+	fmt.Fprintln(wr, ".SH DESCRIPTION")
+
+	formatForMan(wr, p.LongDescription)
+	fmt.Fprintln(wr, "")
+
+	fmt.Fprintln(wr, ".SH OPTIONS")
+
+	writeManPageOptions(wr, p.Command.Group)
+
+	if len(p.visibleCommands()) > 0 {
+		fmt.Fprintln(wr, ".SH COMMANDS")
+
+		writeManPageSubcommands(wr, "", p.Command)
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go
new file mode 100644
index 0000000..96bb1a3
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/multitag.go
@@ -0,0 +1,140 @@
+package flags
+
+import (
+	"strconv"
+)
+
+type multiTag struct {
+	value string
+	cache map[string][]string
+}
+
+func newMultiTag(v string) multiTag {
+	return multiTag{
+		value: v,
+	}
+}
+
+func (x *multiTag) scan() (map[string][]string, error) {
+	v := x.value
+
+	ret := make(map[string][]string)
+
+	// This is mostly copied from reflect.StructTag.Get
+	for v != "" {
+		i := 0
+
+		// Skip whitespace
+		for i < len(v) && v[i] == ' ' {
+			i++
+		}
+
+		v = v[i:]
+
+		if v == "" {
+			break
+		}
+
+		// Scan to colon to find key
+		i = 0
+
+		for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' {
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value)
+		}
+
+		if v[i] != ':' {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value)
+		}
+
+		if i+1 >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value)
+		}
+
+		if v[i+1] != '"' {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value)
+		}
+
+		name := v[:i]
+		v = v[i+1:]
+
+		// Scan quoted string to find value
+		i = 1
+
+		for i < len(v) && v[i] != '"' {
+			if v[i] == '\n' {
+				return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value)
+			}
+
+			if v[i] == '\\' {
+				i++
+			}
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value)
+		}
+
+		val, err := strconv.Unquote(v[:i+1])
+
+		if err != nil {
+			return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value)
+		}
+
+		v = v[i+1:]
+
+		ret[name] = append(ret[name], val)
+	}
+
+	return ret, nil
+}
+
+func (x *multiTag) Parse() error {
+	vals, err := x.scan()
+	x.cache = vals
+
+	return err
+}
+
+func (x *multiTag) cached() map[string][]string {
+	if x.cache == nil {
+		cache, _ := x.scan()
+
+		if cache == nil {
+			cache = make(map[string][]string)
+		}
+
+		x.cache = cache
+	}
+
+	return x.cache
+}
+
+func (x *multiTag) Get(key string) string {
+	c := x.cached()
+
+	if v, ok := c[key]; ok {
+		return v[len(v)-1]
+	}
+
+	return ""
+}
+
+func (x *multiTag) GetMany(key string) []string {
+	c := x.cached()
+	return c[key]
+}
+
+func (x *multiTag) Set(key string, value string) {
+	c := x.cached()
+	c[key] = []string{value}
+}
+
+func (x *multiTag) SetMany(key string, value []string) {
+	c := x.cached()
+	c[key] = value
+}
diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go
new file mode 100644
index 0000000..5f85250
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/option.go
@@ -0,0 +1,459 @@
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// Option flag information. Contains a description of the option, short and
+// long name as well as a default value and whether an argument for this
+// flag is optional.
+type Option struct {
+	// The description of the option flag. This description is shown
+	// automatically in the built-in help.
+	Description string
+
+	// The short name of the option (a single character). If not 0, the
+	// option flag can be 'activated' using -<ShortName>. Either ShortName
+	// or LongName needs to be non-empty.
+	ShortName rune
+
+	// The long name of the option. If not "", the option flag can be
+	// activated using --<LongName>. Either ShortName or LongName needs
+	// to be non-empty.
+	LongName string
+
+	// The default value of the option.
+	Default []string
+
+	// The optional environment default value key name.
+	EnvDefaultKey string
+
+	// The optional delimiter string for EnvDefaultKey values.
+	EnvDefaultDelim string
+
+	// If true, specifies that the argument to an option flag is optional.
+	// When no argument to the flag is specified on the command line, the
+	// value of OptionalValue will be set in the field this option represents.
+	// This is only valid for non-boolean options.
+	OptionalArgument bool
+
+	// The optional value of the option. The optional value is used when
+	// the option flag is marked as having an OptionalArgument. This means
+	// that when the flag is specified, but no option argument is given,
+	// the value of the field this option represents will be set to
+	// OptionalValue. This is only valid for non-boolean options.
+	OptionalValue []string
+
+	// If true, the option _must_ be specified on the command line. If the
+	// option is not specified, the parser will generate an ErrRequired type
+	// error.
+	Required bool
+
+	// A name for the value of an option shown in the Help as --flag [ValueName]
+	ValueName string
+
+	// A mask value to show in the help instead of the default value. This
+	// is useful for hiding sensitive information in the help, such as
+	// passwords.
+	DefaultMask string
+
+	// If non empty, only a certain set of values is allowed for an option.
+	Choices []string
+
+	// If true, the option is not displayed in the help or man page
+	Hidden bool
+
+	// The group which the option belongs to
+	group *Group
+
+	// The struct field which the option represents.
+	field reflect.StructField
+
+	// The struct field value which the option represents.
+	value reflect.Value
+
+	// Determines if the option will be always quoted in the INI output
+	iniQuote bool
+
+	tag            multiTag
+	isSet          bool
+	isSetDefault   bool
+	preventDefault bool
+
+	defaultLiteral string
+}
+
+// LongNameWithNamespace returns the option's long name with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the long name
+// itself are separated by the parser's namespace delimiter. If the long name is
+// empty an empty string is returned.
+func (option *Option) LongNameWithNamespace() string {
+	if len(option.LongName) == 0 {
+		return ""
+	}
+
+	// fetch the namespace delimiter from the parser which is always at the
+	// end of the group hierarchy
+	namespaceDelimiter := ""
+	g := option.group
+
+	for {
+		if p, ok := g.parent.(*Parser); ok {
+			namespaceDelimiter = p.NamespaceDelimiter
+
+			break
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		}
+	}
+
+	// concatenate long name with namespace
+	longName := option.LongName
+	g = option.group
+
+	for g != nil {
+		if g.Namespace != "" {
+			longName = g.Namespace + namespaceDelimiter + longName
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		case *Parser:
+			g = nil
+		}
+	}
+
+	return longName
+}
+
+// String converts an option to a human friendly readable string describing the
+// option.
+func (option *Option) String() string {
+	var s string
+	var short string
+
+	if option.ShortName != 0 {
+		data := make([]byte, utf8.RuneLen(option.ShortName))
+		utf8.EncodeRune(data, option.ShortName)
+		short = string(data)
+
+		if len(option.LongName) != 0 {
+			s = fmt.Sprintf("%s%s, %s%s",
+				string(defaultShortOptDelimiter), short,
+				defaultLongOptDelimiter, option.LongNameWithNamespace())
+		} else {
+			s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short)
+		}
+	} else if len(option.LongName) != 0 {
+		s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace())
+	}
+
+	return s
+}
+
+// Value returns the option value as an interface{}.
+func (option *Option) Value() interface{} {
+	return option.value.Interface()
+}
+
+// Field returns the reflect struct field of the option.
+func (option *Option) Field() reflect.StructField {
+	return option.field
+}
+
+// IsSet returns true if option has been set
+func (option *Option) IsSet() bool {
+	return option.isSet
+}
+
+// IsSetDefault returns true if option has been set via the default option tag
+func (option *Option) IsSetDefault() bool {
+	return option.isSetDefault
+}
+
+// Set the value of an option to the specified value. An error will be returned
+// if the specified value could not be converted to the corresponding option
+// value type.
+func (option *Option) set(value *string) error {
+	kind := option.value.Type().Kind()
+
+	if (kind == reflect.Map || kind == reflect.Slice) && !option.isSet {
+		option.empty()
+	}
+
+	option.isSet = true
+	option.preventDefault = true
+
+	if len(option.Choices) != 0 {
+		found := false
+
+		for _, choice := range option.Choices {
+			if choice == *value {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ")
+
+			if len(option.Choices) > 1 {
+				allowed += " or " + option.Choices[len(option.Choices)-1]
+			}
+
+			return newErrorf(ErrInvalidChoice,
+				"Invalid value `%s' for option `%s'. Allowed values are: %s",
+				*value, option, allowed)
+		}
+	}
+
+	if option.isFunc() {
+		return option.call(value)
+	} else if value != nil {
+		return convert(*value, option.value, option.tag)
+	}
+
+	return convert("", option.value, option.tag)
+}
+
+func (option *Option) canCli() bool {
+	return option.ShortName != 0 || len(option.LongName) != 0
+}
+
+func (option *Option) canArgument() bool {
+	if u := option.isUnmarshaler(); u != nil {
+		return true
+	}
+
+	return !option.isBool()
+}
+
+func (option *Option) emptyValue() reflect.Value {
+	tp := option.value.Type()
+
+	if tp.Kind() == reflect.Map {
+		return reflect.MakeMap(tp)
+	}
+
+	return reflect.Zero(tp)
+}
+
+func (option *Option) empty() {
+	if !option.isFunc() {
+		option.value.Set(option.emptyValue())
+	}
+}
+
+func (option *Option) clearDefault() {
+	usedDefault := option.Default
+
+	if envKey := option.EnvDefaultKey; envKey != "" {
+		if value, ok := os.LookupEnv(envKey); ok {
+			if option.EnvDefaultDelim != "" {
+				usedDefault = strings.Split(value,
+					option.EnvDefaultDelim)
+			} else {
+				usedDefault = []string{value}
+			}
+		}
+	}
+
+	option.isSetDefault = true
+
+	if len(usedDefault) > 0 {
+		option.empty()
+
+		for _, d := range usedDefault {
+			option.set(&d)
+			option.isSetDefault = true
+		}
+	} else {
+		tp := option.value.Type()
+
+		switch tp.Kind() {
+		case reflect.Map:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		case reflect.Slice:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		}
+	}
+}
+
+func (option *Option) valueIsDefault() bool {
+	// Check if the value of the option corresponds to its
+	// default value
+	emptyval := option.emptyValue()
+
+	checkvalptr := reflect.New(emptyval.Type())
+	checkval := reflect.Indirect(checkvalptr)
+
+	checkval.Set(emptyval)
+
+	if len(option.Default) != 0 {
+		for _, v := range option.Default {
+			convert(v, checkval, option.tag)
+		}
+	}
+
+	return reflect.DeepEqual(option.value.Interface(), checkval.Interface())
+}
+
+func (option *Option) isUnmarshaler() Unmarshaler {
+	v := option.value
+
+	for {
+		if !v.CanInterface() {
+			break
+		}
+
+		i := v.Interface()
+
+		if u, ok := i.(Unmarshaler); ok {
+			return u
+		}
+
+		if !v.CanAddr() {
+			break
+		}
+
+		v = v.Addr()
+	}
+
+	return nil
+}
+
+func (option *Option) isBool() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Bool:
+			return true
+		case reflect.Func:
+			return tp.NumIn() == 0
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isSignedNumber() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64:
+			return true
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isFunc() bool {
+	return option.value.Type().Kind() == reflect.Func
+}
+
+func (option *Option) call(value *string) error {
+	var retval []reflect.Value
+
+	if value == nil {
+		retval = option.value.Call(nil)
+	} else {
+		tp := option.value.Type().In(0)
+
+		val := reflect.New(tp)
+		val = reflect.Indirect(val)
+
+		if err := convert(*value, val, option.tag); err != nil {
+			return err
+		}
+
+		retval = option.value.Call([]reflect.Value{val})
+	}
+
+	if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() {
+		if retval[0].Interface() == nil {
+			return nil
+		}
+
+		return retval[0].Interface().(error)
+	}
+
+	return nil
+}
+
+func (option *Option) updateDefaultLiteral() {
+	defs := option.Default
+	def := ""
+
+	if len(defs) == 0 && option.canArgument() {
+		var showdef bool
+
+		switch option.field.Type.Kind() {
+		case reflect.Func, reflect.Ptr:
+			showdef = !option.value.IsNil()
+		case reflect.Slice, reflect.String, reflect.Array:
+			showdef = option.value.Len() > 0
+		case reflect.Map:
+			showdef = !option.value.IsNil() && option.value.Len() > 0
+		default:
+			zeroval := reflect.Zero(option.field.Type)
+			showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface())
+		}
+
+		if showdef {
+			def, _ = convertToString(option.value, option.tag)
+		}
+	} else if len(defs) != 0 {
+		l := len(defs) - 1
+
+		for i := 0; i < l; i++ {
+			def += quoteIfNeeded(defs[i]) + ", "
+		}
+
+		def += quoteIfNeeded(defs[l])
+	}
+
+	option.defaultLiteral = def
+}
+
+func (option *Option) shortAndLongName() string {
+	ret := &bytes.Buffer{}
+
+	if option.ShortName != 0 {
+		ret.WriteRune(defaultShortOptDelimiter)
+		ret.WriteRune(option.ShortName)
+	}
+
+	if len(option.LongName) != 0 {
+		if option.ShortName != 0 {
+			ret.WriteRune('/')
+		}
+
+		ret.WriteString(option.LongName)
+	}
+
+	return ret.String()
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
new file mode 100644
index 0000000..56dfdae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
@@ -0,0 +1,67 @@
+// +build !windows forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+const (
+	defaultShortOptDelimiter = '-'
+	defaultLongOptDelimiter  = "--"
+	defaultNameArgDelimiter  = '='
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && arg[0] == '-'
+}
+
+func argumentIsOption(arg string) bool {
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	if strings.HasPrefix(optname, "--") {
+		return "--", optname[2:], true
+	} else if strings.HasPrefix(optname, "-") {
+		return "-", optname[1:], false
+	}
+
+	return "", optname, false
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	pos := strings.Index(option, "=")
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], "=", &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	var help struct {
+		ShowHelp func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelp = showHelp
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
new file mode 100644
index 0000000..f3f28ae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
@@ -0,0 +1,108 @@
+// +build !forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+// Windows uses a front slash for both short and long options.  Also it uses
+// a colon for name/argument delimter.
+const (
+	defaultShortOptDelimiter = '/'
+	defaultLongOptDelimiter  = "/"
+	defaultNameArgDelimiter  = ':'
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/')
+}
+
+func argumentIsOption(arg string) bool {
+	// Windows-style options allow front slash for the option
+	// delimiter.
+	if len(arg) > 1 && arg[0] == '/' {
+		return true
+	}
+
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	// Determine if the argument is a long option or not.  Windows
+	// typically supports both long and short options with a single
+	// front slash as the option delimiter, so handle this situation
+	// nicely.
+	possplit := 0
+
+	if strings.HasPrefix(optname, "--") {
+		possplit = 2
+		islong = true
+	} else if strings.HasPrefix(optname, "-") {
+		possplit = 1
+		islong = false
+	} else if strings.HasPrefix(optname, "/") {
+		possplit = 1
+		islong = len(optname) > 2
+	}
+
+	return optname[:possplit], optname[possplit:], islong
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	if len(option) == 0 {
+		return option, "", nil
+	}
+
+	// Windows typically uses a colon for the option name and argument
+	// delimiter while POSIX typically uses an equals.  Support both styles,
+	// but don't allow the two to be mixed.  That is to say /foo:bar and
+	// --foo=bar are acceptable, but /foo=bar and --foo:bar are not.
+	var pos int
+	var sp string
+
+	if prefix == "/" {
+		sp = ":"
+		pos = strings.Index(option, sp)
+	} else if len(prefix) > 0 {
+		sp = "="
+		pos = strings.Index(option, sp)
+	}
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], sp, &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	// Windows CLI applications typically use /? for help, so make both
+	// that available as well as the POSIX style h and help.
+	var help struct {
+		ShowHelpWindows func() error `short:"?" description:"Show this help message"`
+		ShowHelpPosix   func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelpWindows = showHelp
+	help.ShowHelpPosix = showHelp
+
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go
new file mode 100644
index 0000000..0a7922a
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/parser.go
@@ -0,0 +1,700 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// A Parser provides command line option parsing. It can contain several
+// option groups each with their own set of options.
+type Parser struct {
+	// Embedded, see Command for more information
+	*Command
+
+	// A usage string to be displayed in the help message.
+	Usage string
+
+	// Option flags changing the behavior of the parser.
+	Options Options
+
+	// NamespaceDelimiter separates group namespaces and option long names
+	NamespaceDelimiter string
+
+	// UnknownOptionsHandler is a function which gets called when the parser
+	// encounters an unknown option. The function receives the unknown option
+	// name, a SplitArgument which specifies its value if set with an argument
+	// separator, and the remaining command line arguments.
+	// It should return a new list of remaining arguments to continue parsing,
+	// or an error to indicate a parse failure.
+	UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error)
+
+	// CompletionHandler is a function gets called to handle the completion of
+	// items. By default, the items are printed and the application is exited.
+	// You can override this default behavior by specifying a custom CompletionHandler.
+	CompletionHandler func(items []Completion)
+
+	// CommandHandler is a function that gets called to handle execution of a
+	// command. By default, the command will simply be executed. This can be
+	// overridden to perform certain actions (such as applying global flags)
+	// just before the command is executed. Note that if you override the
+	// handler it is your responsibility to call the command.Execute function.
+	//
+	// The command passed into CommandHandler may be nil in case there is no
+	// command to be executed when parsing has finished.
+	CommandHandler func(command Commander, args []string) error
+
+	internalError error
+}
+
+// SplitArgument represents the argument value of an option that was passed using
+// an argument separator.
+type SplitArgument interface {
+	// String returns the option's value as a string, and a boolean indicating
+	// if the option was present.
+	Value() (string, bool)
+}
+
+type strArgument struct {
+	value *string
+}
+
+func (s strArgument) Value() (string, bool) {
+	if s.value == nil {
+		return "", false
+	}
+
+	return *s.value, true
+}
+
+// Options provides parser options that change the behavior of the option
+// parser.
+type Options uint
+
+const (
+	// None indicates no options.
+	None Options = 0
+
+	// HelpFlag adds a default Help Options group to the parser containing
+	// -h and --help options. When either -h or --help is specified on the
+	// command line, the parser will return the special error of type
+	// ErrHelp. When PrintErrors is also specified, then the help message
+	// will also be automatically printed to os.Stdout.
+	HelpFlag = 1 << iota
+
+	// PassDoubleDash passes all arguments after a double dash, --, as
+	// remaining command line arguments (i.e. they will not be parsed for
+	// flags).
+	PassDoubleDash
+
+	// IgnoreUnknown ignores any unknown options and passes them as
+	// remaining command line arguments instead of generating an error.
+	IgnoreUnknown
+
+	// PrintErrors prints any errors which occurred during parsing to
+	// os.Stderr. In the special case of ErrHelp, the message will be printed
+	// to os.Stdout.
+	PrintErrors
+
+	// PassAfterNonOption passes all arguments after the first non option
+	// as remaining command line arguments. This is equivalent to strict
+	// POSIX processing.
+	PassAfterNonOption
+
+	// Default is a convenient default set of options which should cover
+	// most of the uses of the flags package.
+	Default = HelpFlag | PrintErrors | PassDoubleDash
+)
+
+type parseState struct {
+	arg        string
+	args       []string
+	retargs    []string
+	positional []*Arg
+	err        error
+
+	command *Command
+	lookup  lookup
+}
+
+// Parse is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). For more control, use
+// flags.NewParser.
+func Parse(data interface{}) ([]string, error) {
+	return NewParser(data, Default).Parse()
+}
+
+// ParseArgs is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). The args argument is
+// the list of command line arguments to parse. If you just want to parse the
+// default program command line arguments (i.e. os.Args), then use flags.Parse
+// instead. For more control, use flags.NewParser.
+func ParseArgs(data interface{}, args []string) ([]string, error) {
+	return NewParser(data, Default).ParseArgs(args)
+}
+
+// NewParser creates a new parser. It uses os.Args[0] as the application
+// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for
+// more details). The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"), or nil if the default
+// group should not be added. The options parameter specifies a set of options
+// for the parser.
+func NewParser(data interface{}, options Options) *Parser {
+	p := NewNamedParser(path.Base(os.Args[0]), options)
+
+	if data != nil {
+		g, err := p.AddGroup("Application Options", "", data)
+
+		if err == nil {
+			g.parent = p
+		}
+
+		p.internalError = err
+	}
+
+	return p
+}
+
+// NewNamedParser creates a new parser. The appname is used to display the
+// executable name in the built-in help message. Option groups and commands can
+// be added to this parser by using AddGroup and AddCommand.
+func NewNamedParser(appname string, options Options) *Parser {
+	p := &Parser{
+		Command:            newCommand(appname, "", "", nil),
+		Options:            options,
+		NamespaceDelimiter: ".",
+	}
+
+	p.Command.parent = p
+
+	return p
+}
+
+// Parse parses the command line arguments from os.Args using Parser.ParseArgs.
+// For more detailed information see ParseArgs.
+func (p *Parser) Parse() ([]string, error) {
+	return p.ParseArgs(os.Args[1:])
+}
+
+// ParseArgs parses the command line arguments according to the option groups that
+// were added to the parser. On successful parsing of the arguments, the
+// remaining, non-option, arguments (if any) are returned. The returned error
+// indicates a parsing error and can be used with PrintError to display
+// contextual information on where the error occurred exactly.
+//
+// When the common help group has been added (AddHelp) and either -h or --help
+// was specified in the command line arguments, a help message will be
+// automatically printed if the PrintErrors option is enabled.
+// Furthermore, the special error type ErrHelp is returned.
+// It is up to the caller to exit the program if so desired.
+func (p *Parser) ParseArgs(args []string) ([]string, error) {
+	if p.internalError != nil {
+		return nil, p.internalError
+	}
+
+	p.eachOption(func(c *Command, g *Group, option *Option) {
+		option.isSet = false
+		option.isSetDefault = false
+		option.updateDefaultLiteral()
+	})
+
+	// Add built-in help group to all commands if necessary
+	if (p.Options & HelpFlag) != None {
+		p.addHelpGroups(p.showBuiltinHelp)
+	}
+
+	compval := os.Getenv("GO_FLAGS_COMPLETION")
+
+	if len(compval) != 0 {
+		comp := &completion{parser: p}
+		items := comp.complete(args)
+
+		if p.CompletionHandler != nil {
+			p.CompletionHandler(items)
+		} else {
+			comp.print(items, compval == "verbose")
+			os.Exit(0)
+		}
+
+		return nil, nil
+	}
+
+	s := &parseState{
+		args:    args,
+		retargs: make([]string, 0, len(args)),
+	}
+
+	p.fillParseState(s)
+
+	for !s.eof() {
+		arg := s.pop()
+
+		// When PassDoubleDash is set and we encounter a --, then
+		// simply append all the rest as arguments and break out
+		if (p.Options&PassDoubleDash) != None && arg == "--" {
+			s.addArgs(s.args...)
+			break
+		}
+
+		if !argumentIsOption(arg) {
+			// Note: this also sets s.err, so we can just check for
+			// nil here and use s.err later
+			if p.parseNonOption(s) != nil {
+				break
+			}
+
+			continue
+		}
+
+		var err error
+
+		prefix, optname, islong := stripOptionPrefix(arg)
+		optname, _, argument := splitOption(prefix, optname, islong)
+
+		if islong {
+			err = p.parseLong(s, optname, argument)
+		} else {
+			err = p.parseShort(s, optname, argument)
+		}
+
+		if err != nil {
+			ignoreUnknown := (p.Options & IgnoreUnknown) != None
+			parseErr := wrapError(err)
+
+			if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) {
+				s.err = parseErr
+				break
+			}
+
+			if ignoreUnknown {
+				s.addArgs(arg)
+			} else if p.UnknownOptionHandler != nil {
+				modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args)
+
+				if err != nil {
+					s.err = err
+					break
+				}
+
+				s.args = modifiedArgs
+			}
+		}
+	}
+
+	if s.err == nil {
+		p.eachOption(func(c *Command, g *Group, option *Option) {
+			if option.preventDefault {
+				return
+			}
+
+			option.clearDefault()
+		})
+
+		s.checkRequired(p)
+	}
+
+	var reterr error
+
+	if s.err != nil {
+		reterr = s.err
+	} else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional {
+		reterr = s.estimateCommand()
+	} else if cmd, ok := s.command.data.(Commander); ok {
+		if p.CommandHandler != nil {
+			reterr = p.CommandHandler(cmd, s.retargs)
+		} else {
+			reterr = cmd.Execute(s.retargs)
+		}
+	} else if p.CommandHandler != nil {
+		reterr = p.CommandHandler(nil, s.retargs)
+	}
+
+	if reterr != nil {
+		var retargs []string
+
+		if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp {
+			retargs = append([]string{s.arg}, s.args...)
+		} else {
+			retargs = s.args
+		}
+
+		return retargs, p.printError(reterr)
+	}
+
+	return s.retargs, nil
+}
+
+func (p *parseState) eof() bool {
+	return len(p.args) == 0
+}
+
+func (p *parseState) pop() string {
+	if p.eof() {
+		return ""
+	}
+
+	p.arg = p.args[0]
+	p.args = p.args[1:]
+
+	return p.arg
+}
+
+func (p *parseState) peek() string {
+	if p.eof() {
+		return ""
+	}
+
+	return p.args[0]
+}
+
+func (p *parseState) checkRequired(parser *Parser) error {
+	c := parser.Command
+
+	var required []*Option
+
+	for c != nil {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				if !option.isSet && option.Required {
+					required = append(required, option)
+				}
+			}
+		})
+
+		c = c.Active
+	}
+
+	if len(required) == 0 {
+		if len(p.positional) > 0 {
+			var reqnames []string
+
+			for _, arg := range p.positional {
+				argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1
+
+				if !argRequired {
+					continue
+				}
+
+				if arg.isRemaining() {
+					if arg.value.Len() < arg.Required {
+						var arguments string
+
+						if arg.Required > 1 {
+							arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len())
+						} else {
+							arguments = "argument"
+						}
+
+						reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`")
+					} else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum {
+						if arg.RequiredMaximum == 0 {
+							reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`")
+						} else {
+							var arguments string
+
+							if arg.RequiredMaximum > 1 {
+								arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len())
+							} else {
+								arguments = "argument"
+							}
+
+							reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`")
+						}
+					}
+				} else {
+					reqnames = append(reqnames, "`"+arg.Name+"`")
+				}
+			}
+
+			if len(reqnames) == 0 {
+				return nil
+			}
+
+			var msg string
+
+			if len(reqnames) == 1 {
+				msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
+			} else {
+				msg = fmt.Sprintf("the required arguments %s and %s were not provided",
+					strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
+			}
+
+			p.err = newError(ErrRequired, msg)
+			return p.err
+		}
+
+		return nil
+	}
+
+	names := make([]string, 0, len(required))
+
+	for _, k := range required {
+		names = append(names, "`"+k.String()+"'")
+	}
+
+	sort.Strings(names)
+
+	var msg string
+
+	if len(names) == 1 {
+		msg = fmt.Sprintf("the required flag %s was not specified", names[0])
+	} else {
+		msg = fmt.Sprintf("the required flags %s and %s were not specified",
+			strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
+	}
+
+	p.err = newError(ErrRequired, msg)
+	return p.err
+}
+
+func (p *parseState) estimateCommand() error {
+	commands := p.command.sortedVisibleCommands()
+	cmdnames := make([]string, len(commands))
+
+	for i, v := range commands {
+		cmdnames[i] = v.Name
+	}
+
+	var msg string
+	var errtype ErrorType
+
+	if len(p.retargs) != 0 {
+		c, l := closestChoice(p.retargs[0], cmdnames)
+		msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
+		errtype = ErrUnknownCommand
+
+		if float32(l)/float32(len(c)) < 0.5 {
+			msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
+		} else if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("%s. You should use the %s command",
+				msg,
+				cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
+				msg,
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	} else {
+		errtype = ErrCommandRequired
+
+		if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("Please specify one command of: %s or %s",
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	}
+
+	return newError(errtype, msg)
+}
+
+func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
+	if !option.canArgument() {
+		if argument != nil {
+			return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option)
+		}
+
+		err = option.set(nil)
+	} else if argument != nil || (canarg && !s.eof()) {
+		var arg string
+
+		if argument != nil {
+			arg = *argument
+		} else {
+			arg = s.pop()
+
+			if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') {
+				return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got option `%s'", option, arg)
+			} else if p.Options&PassDoubleDash != 0 && arg == "--" {
+				return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option)
+			}
+		}
+
+		if option.tag.Get("unquote") != "false" {
+			arg, err = unquoteIfPossible(arg)
+		}
+
+		if err == nil {
+			err = option.set(&arg)
+		}
+	} else if option.OptionalArgument {
+		option.empty()
+
+		for _, v := range option.OptionalValue {
+			err = option.set(&v)
+
+			if err != nil {
+				break
+			}
+		}
+	} else {
+		err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option)
+	}
+
+	if err != nil {
+		if _, ok := err.(*Error); !ok {
+			err = newErrorf(ErrMarshal, "invalid argument for flag `%s' (expected %s): %s",
+				option,
+				option.value.Type(),
+				err.Error())
+		}
+	}
+
+	return err
+}
+
+func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
+	if option := s.lookup.longNames[name]; option != nil {
+		// Only long options that are required can consume an argument
+		// from the argument list
+		canarg := !option.OptionalArgument
+
+		return p.parseOption(s, name, option, canarg, argument)
+	}
+
+	return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name)
+}
+
+func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
+	c, n := utf8.DecodeRuneInString(optname)
+
+	if n == len(optname) {
+		return optname, nil
+	}
+
+	first := string(c)
+
+	if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
+		arg := optname[n:]
+		return first, &arg
+	}
+
+	return optname, nil
+}
+
+func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
+	if argument == nil {
+		optname, argument = p.splitShortConcatArg(s, optname)
+	}
+
+	for i, c := range optname {
+		shortname := string(c)
+
+		if option := s.lookup.shortNames[shortname]; option != nil {
+			// Only the last short argument can consume an argument from
+			// the arguments list, and only if it's non optional
+			canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
+
+			if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
+				return err
+			}
+		} else {
+			return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname)
+		}
+
+		// Only the first option can have a concatted argument, so just
+		// clear argument here
+		argument = nil
+	}
+
+	return nil
+}
+
+func (p *parseState) addArgs(args ...string) error {
+	for len(p.positional) > 0 && len(args) > 0 {
+		arg := p.positional[0]
+
+		if err := convert(args[0], arg.value, arg.tag); err != nil {
+			p.err = err
+			return err
+		}
+
+		if !arg.isRemaining() {
+			p.positional = p.positional[1:]
+		}
+
+		args = args[1:]
+	}
+
+	p.retargs = append(p.retargs, args...)
+	return nil
+}
+
+func (p *Parser) parseNonOption(s *parseState) error {
+	if len(s.positional) > 0 {
+		return s.addArgs(s.arg)
+	}
+
+	if len(s.command.commands) > 0 && len(s.retargs) == 0 {
+		if cmd := s.lookup.commands[s.arg]; cmd != nil {
+			s.command.Active = cmd
+			cmd.fillParseState(s)
+
+			return nil
+		} else if !s.command.SubcommandsOptional {
+			s.addArgs(s.arg)
+			return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg)
+		}
+	}
+
+	if (p.Options & PassAfterNonOption) != None {
+		// If PassAfterNonOption is set then all remaining arguments
+		// are considered positional
+		if err := s.addArgs(s.arg); err != nil {
+			return err
+		}
+
+		if err := s.addArgs(s.args...); err != nil {
+			return err
+		}
+
+		s.args = []string{}
+	} else {
+		return s.addArgs(s.arg)
+	}
+
+	return nil
+}
+
+func (p *Parser) showBuiltinHelp() error {
+	var b bytes.Buffer
+
+	p.WriteHelp(&b)
+	return newError(ErrHelp, b.String())
+}
+
+func (p *Parser) printError(err error) error {
+	if err != nil && (p.Options&PrintErrors) != None {
+		flagsErr, ok := err.(*Error)
+
+		if ok && flagsErr.Type == ErrHelp {
+			fmt.Fprintln(os.Stdout, err)
+		} else {
+			fmt.Fprintln(os.Stderr, err)
+		}
+	}
+
+	return err
+}
+
+func (p *Parser) clearIsSet() {
+	p.eachCommand(func(c *Command) {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				option.isSet = false
+			}
+		})
+	}, true)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go
new file mode 100644
index 0000000..1ca6a85
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize.go
@@ -0,0 +1,28 @@
+// +build !windows,!plan9,!solaris,!appengine
+
+package flags
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+type winsize struct {
+	row, col       uint16
+	xpixel, ypixel uint16
+}
+
+func getTerminalColumns() int {
+	ws := winsize{}
+
+	if tIOCGWINSZ != 0 {
+		syscall.Syscall(syscall.SYS_IOCTL,
+			uintptr(0),
+			uintptr(tIOCGWINSZ),
+			uintptr(unsafe.Pointer(&ws)))
+
+		return int(ws.col)
+	}
+
+	return 80
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
new file mode 100644
index 0000000..3d5385b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
@@ -0,0 +1,7 @@
+// +build windows plan9 solaris appengine
+
+package flags
+
+func getTerminalColumns() int {
+	return 80
+}
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
new file mode 100644
index 0000000..fcc1186
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_bsdish.go
@@ -0,0 +1,7 @@
+// +build darwin freebsd netbsd openbsd
+
+package flags
+
+const (
+	tIOCGWINSZ = 0x40087468
+)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
new file mode 100644
index 0000000..e3975e2
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_linux.go
@@ -0,0 +1,7 @@
+// +build linux
+
+package flags
+
+const (
+	tIOCGWINSZ = 0x5413
+)
diff --git a/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
new file mode 100644
index 0000000..3082151
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/tiocgwinsz_other.go
@@ -0,0 +1,7 @@
+// +build !darwin,!freebsd,!netbsd,!openbsd,!linux
+
+package flags
+
+const (
+	tIOCGWINSZ = 0
+)
diff --git a/vendor/github.com/jhump/protoreflect/LICENSE b/vendor/github.com/jhump/protoreflect/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/jhump/protoreflect/desc/convert.go b/vendor/github.com/jhump/protoreflect/desc/convert.go
new file mode 100644
index 0000000..6573c90
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/convert.go
@@ -0,0 +1,216 @@
+package desc
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+	intn "github.com/jhump/protoreflect/internal"
+)
+
+// CreateFileDescriptor instantiates a new file descriptor for the given descriptor proto.
+// The file's direct dependencies must be provided. If the given dependencies do not include
+// all of the file's dependencies or if the contents of the descriptors are internally
+// inconsistent (e.g. contain unresolvable symbols) then an error is returned.
+func CreateFileDescriptor(fd *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fd, deps, nil)
+}
+
+func createFileDescriptor(fd *dpb.FileDescriptorProto, deps []*FileDescriptor, r *ImportResolver) (*FileDescriptor, error) {
+	ret := &FileDescriptor{
+		proto:      fd,
+		symbols:    map[string]Descriptor{},
+		fieldIndex: map[string]map[int32]*FieldDescriptor{},
+	}
+	pkg := fd.GetPackage()
+
+	// populate references to file descriptor dependencies
+	files := map[string]*FileDescriptor{}
+	for _, f := range deps {
+		files[f.proto.GetName()] = f
+	}
+	ret.deps = make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, d := range fd.GetDependency() {
+		resolved := r.ResolveImport(fd.GetName(), d)
+		ret.deps[i] = files[resolved]
+		if ret.deps[i] == nil {
+			if resolved != d {
+				ret.deps[i] = files[d]
+			}
+			if ret.deps[i] == nil {
+				return nil, intn.ErrNoSuchFile(d)
+			}
+		}
+	}
+	ret.publicDeps = make([]*FileDescriptor, len(fd.GetPublicDependency()))
+	for i, pd := range fd.GetPublicDependency() {
+		ret.publicDeps[i] = ret.deps[pd]
+	}
+	ret.weakDeps = make([]*FileDescriptor, len(fd.GetWeakDependency()))
+	for i, wd := range fd.GetWeakDependency() {
+		ret.weakDeps[i] = ret.deps[wd]
+	}
+	ret.isProto3 = fd.GetSyntax() == "proto3"
+
+	// populate all tables of child descriptors
+	for _, m := range fd.GetMessageType() {
+		md, n := createMessageDescriptor(ret, ret, pkg, m, ret.symbols)
+		ret.symbols[n] = md
+		ret.messages = append(ret.messages, md)
+	}
+	for _, e := range fd.GetEnumType() {
+		ed, n := createEnumDescriptor(ret, ret, pkg, e, ret.symbols)
+		ret.symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range fd.GetExtension() {
+		exd, n := createFieldDescriptor(ret, ret, pkg, ex)
+		ret.symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for _, s := range fd.GetService() {
+		sd, n := createServiceDescriptor(ret, pkg, s, ret.symbols)
+		ret.symbols[n] = sd
+		ret.services = append(ret.services, sd)
+	}
+
+	ret.sourceInfo = internal.CreateSourceInfoMap(fd)
+	ret.sourceInfoRecomputeFunc = ret.recomputeSourceInfo
+
+	// now we can resolve all type references and source code info
+	scopes := []scope{fileScope(ret)}
+	path := make([]int32, 1, 8)
+	path[0] = internal.File_messagesTag
+	for i, md := range ret.messages {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_enumsTag
+	for i, ed := range ret.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[0] = internal.File_extensionsTag
+	for i, exd := range ret.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+	path[0] = internal.File_servicesTag
+	for i, sd := range ret.services {
+		if err := sd.resolve(append(path, int32(i)), scopes); err != nil {
+			return nil, err
+		}
+	}
+
+	return ret, nil
+}
+
+// CreateFileDescriptors constructs a set of descriptors, one for each of the
+// given descriptor protos. The given set of descriptor protos must include all
+// transitive dependencies for every file.
+func CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, nil)
+}
+
+func createFileDescriptors(fds []*dpb.FileDescriptorProto, r *ImportResolver) (map[string]*FileDescriptor, error) {
+	if len(fds) == 0 {
+		return nil, nil
+	}
+	files := map[string]*dpb.FileDescriptorProto{}
+	resolved := map[string]*FileDescriptor{}
+	var name string
+	for _, fd := range fds {
+		name = fd.GetName()
+		files[name] = fd
+	}
+	for _, fd := range fds {
+		_, err := createFromSet(fd.GetName(), r, nil, files, resolved)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return resolved, nil
+}
+
+// ToFileDescriptorSet creates a FileDescriptorSet proto that contains all of the given
+// file descriptors and their transitive dependencies. The files are topologically sorted
+// so that a file will always appear after its dependencies.
+func ToFileDescriptorSet(fds ...*FileDescriptor) *dpb.FileDescriptorSet {
+	var fdps []*dpb.FileDescriptorProto
+	addAllFiles(fds, &fdps, map[string]struct{}{})
+	return &dpb.FileDescriptorSet{File: fdps}
+}
+
+func addAllFiles(src []*FileDescriptor, results *[]*dpb.FileDescriptorProto, seen map[string]struct{}) {
+	for _, fd := range src {
+		if _, ok := seen[fd.GetName()]; ok {
+			continue
+		}
+		seen[fd.GetName()] = struct{}{}
+		addAllFiles(fd.GetDependencies(), results, seen)
+		*results = append(*results, fd.AsFileDescriptorProto())
+	}
+}
+
+// CreateFileDescriptorFromSet creates a descriptor from the given file descriptor set. The
+// set's *last* file will be the returned descriptor. The set's remaining files must comprise
+// the full set of transitive dependencies of that last file. This is the same format and
+// order used by protoc when emitting a FileDescriptorSet file with an invocation like so:
+//    protoc --descriptor_set_out=./test.protoset --include_imports -I. test.proto
+func CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, nil)
+}
+
+func createFileDescriptorFromSet(fds *dpb.FileDescriptorSet, r *ImportResolver) (*FileDescriptor, error) {
+	files := fds.GetFile()
+	if len(files) == 0 {
+		return nil, errors.New("file descriptor set is empty")
+	}
+	resolved, err := createFileDescriptors(files, r)
+	if err != nil {
+		return nil, err
+	}
+	lastFilename := files[len(files)-1].GetName()
+	return resolved[lastFilename], nil
+}
+
+// createFromSet creates a descriptor for the given filename. It recursively
+// creates descriptors for the given file's dependencies.
+func createFromSet(filename string, r *ImportResolver, seen []string, files map[string]*dpb.FileDescriptorProto, resolved map[string]*FileDescriptor) (*FileDescriptor, error) {
+	for _, s := range seen {
+		if filename == s {
+			return nil, fmt.Errorf("cycle in imports: %s", strings.Join(append(seen, filename), " -> "))
+		}
+	}
+	seen = append(seen, filename)
+
+	if d, ok := resolved[filename]; ok {
+		return d, nil
+	}
+	fdp := files[filename]
+	if fdp == nil {
+		return nil, intn.ErrNoSuchFile(filename)
+	}
+	deps := make([]*FileDescriptor, len(fdp.GetDependency()))
+	for i, depName := range fdp.GetDependency() {
+		resolvedDep := r.ResolveImport(filename, depName)
+		dep, err := createFromSet(resolvedDep, r, seen, files, resolved)
+		if _, ok := err.(intn.ErrNoSuchFile); ok && resolvedDep != depName {
+			dep, err = createFromSet(depName, r, seen, files, resolved)
+		}
+		if err != nil {
+			return nil, err
+		}
+		deps[i] = dep
+	}
+	d, err := createFileDescriptor(fdp, deps, r)
+	if err != nil {
+		return nil, err
+	}
+	resolved[filename] = d
+	return d, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor.go b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
new file mode 100644
index 0000000..ab235a3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor.go
@@ -0,0 +1,1666 @@
+package desc
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+// Descriptor is the common interface implemented by all descriptor objects.
+type Descriptor interface {
+	// GetName returns the name of the object described by the descriptor. This will
+	// be a base name that does not include enclosing message names or the package name.
+	// For file descriptors, this indicates the path and name to the described file.
+	GetName() string
+	// GetFullyQualifiedName returns the fully-qualified name of the object described by
+	// the descriptor. This will include the package name and any enclosing message names.
+	// For file descriptors, this returns the path and name to the described file (same as
+	// GetName).
+	GetFullyQualifiedName() string
+	// GetParent returns the enclosing element in a proto source file. If the described
+	// object is a top-level object, this returns the file descriptor. Otherwise, it returns
+	// the element in which the described object was declared. File descriptors have no
+	// parent and return nil.
+	GetParent() Descriptor
+	// GetFile returns the file descriptor in which this element was declared. File
+	// descriptors return themselves.
+	GetFile() *FileDescriptor
+	// GetOptions returns the options proto containing options for the described element.
+	GetOptions() proto.Message
+	// GetSourceInfo returns any source code information that was present in the file
+	// descriptor. Source code info is optional. If no source code info is available for
+	// the element (including if there is none at all in the file descriptor) then this
+	// returns nil
+	GetSourceInfo() *dpb.SourceCodeInfo_Location
+	// AsProto returns the underlying descriptor proto for this descriptor.
+	AsProto() proto.Message
+}
+
+type sourceInfoRecomputeFunc = internal.SourceInfoComputeFunc
+
+// FileDescriptor describes a proto source file.
+type FileDescriptor struct {
+	proto      *dpb.FileDescriptorProto
+	symbols    map[string]Descriptor
+	deps       []*FileDescriptor
+	publicDeps []*FileDescriptor
+	weakDeps   []*FileDescriptor
+	messages   []*MessageDescriptor
+	enums      []*EnumDescriptor
+	extensions []*FieldDescriptor
+	services   []*ServiceDescriptor
+	fieldIndex map[string]map[int32]*FieldDescriptor
+	isProto3   bool
+	sourceInfo internal.SourceInfoMap
+	sourceInfoRecomputeFunc
+}
+
+func (fd *FileDescriptor) recomputeSourceInfo() {
+	internal.PopulateSourceInfoMap(fd.proto, fd.sourceInfo)
+}
+
+func (fd *FileDescriptor) registerField(field *FieldDescriptor) {
+	fields := fd.fieldIndex[field.owner.GetFullyQualifiedName()]
+	if fields == nil {
+		fields = map[int32]*FieldDescriptor{}
+		fd.fieldIndex[field.owner.GetFullyQualifiedName()] = fields
+	}
+	fields[field.GetNumber()] = field
+}
+
+// GetName returns the name of the file, as it was given to the protoc invocation
+// to compile it, possibly including path (relative to a directory in the proto
+// import path).
+func (fd *FileDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the name of the file, same as GetName. It is
+// present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFullyQualifiedName() string {
+	return fd.proto.GetName()
+}
+
+// GetPackage returns the name of the package declared in the file.
+func (fd *FileDescriptor) GetPackage() string {
+	return fd.proto.GetPackage()
+}
+
+// GetParent always returns nil: files are the root of descriptor hierarchies.
+// Is it present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetParent() Descriptor {
+	return nil
+}
+
+// GetFile returns the receiver, which is a file descriptor. This is present
+// to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetFile() *FileDescriptor {
+	return fd
+}
+
+// GetOptions returns the file's options. Most usages will be more interested
+// in GetFileOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFileOptions returns the file's options.
+func (fd *FileDescriptor) GetFileOptions() *dpb.FileOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns nil for files. It is present to satisfy the Descriptor
+// interface.
+func (fd *FileDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return nil
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFileDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FileDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFileDescriptorProto returns the underlying descriptor proto.
+func (fd *FileDescriptor) AsFileDescriptorProto() *dpb.FileDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FileDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// IsProto3 returns true if the file declares a syntax of "proto3".
+func (fd *FileDescriptor) IsProto3() bool {
+	return fd.isProto3
+}
+
+// GetDependencies returns all of this file's dependencies. These correspond to
+// import statements in the file.
+func (fd *FileDescriptor) GetDependencies() []*FileDescriptor {
+	return fd.deps
+}
+
+// GetPublicDependencies returns all of this file's public dependencies. These
+// correspond to public import statements in the file.
+func (fd *FileDescriptor) GetPublicDependencies() []*FileDescriptor {
+	return fd.publicDeps
+}
+
+// GetWeakDependencies returns all of this file's weak dependencies. These
+// correspond to weak import statements in the file.
+func (fd *FileDescriptor) GetWeakDependencies() []*FileDescriptor {
+	return fd.weakDeps
+}
+
+// GetMessageTypes returns all top-level messages declared in this file.
+func (fd *FileDescriptor) GetMessageTypes() []*MessageDescriptor {
+	return fd.messages
+}
+
+// GetEnumTypes returns all top-level enums declared in this file.
+func (fd *FileDescriptor) GetEnumTypes() []*EnumDescriptor {
+	return fd.enums
+}
+
+// GetExtensions returns all top-level extensions declared in this file.
+func (fd *FileDescriptor) GetExtensions() []*FieldDescriptor {
+	return fd.extensions
+}
+
+// GetServices returns all services declared in this file.
+func (fd *FileDescriptor) GetServices() []*ServiceDescriptor {
+	return fd.services
+}
+
+// FindSymbol returns the descriptor contained within this file for the
+// element with the given fully-qualified symbol name. If no such element
+// exists then this method returns nil.
+func (fd *FileDescriptor) FindSymbol(symbol string) Descriptor {
+	if symbol[0] == '.' {
+		symbol = symbol[1:]
+	}
+	return fd.symbols[symbol]
+}
+
+// FindMessage finds the message with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindMessage(msgName string) *MessageDescriptor {
+	if md, ok := fd.symbols[msgName].(*MessageDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// FindEnum finds the enum with the given fully-qualified name. If no such
+// element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindEnum(enumName string) *EnumDescriptor {
+	if ed, ok := fd.symbols[enumName].(*EnumDescriptor); ok {
+		return ed
+	} else {
+		return nil
+	}
+}
+
+// FindService finds the service with the given fully-qualified name. If no
+// such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindService(serviceName string) *ServiceDescriptor {
+	if sd, ok := fd.symbols[serviceName].(*ServiceDescriptor); ok {
+		return sd
+	} else {
+		return nil
+	}
+}
+
+// FindExtension finds the extension field for the given extended type name and
+// tag number. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtension(extendeeName string, tagNumber int32) *FieldDescriptor {
+	if exd, ok := fd.fieldIndex[extendeeName][tagNumber]; ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// FindExtensionByName finds the extension field with the given fully-qualified
+// name. If no such element exists in this file then nil is returned.
+func (fd *FileDescriptor) FindExtensionByName(extName string) *FieldDescriptor {
+	if exd, ok := fd.symbols[extName].(*FieldDescriptor); ok && exd.IsExtension() {
+		return exd
+	} else {
+		return nil
+	}
+}
+
+// MessageDescriptor describes a protocol buffer message.
+type MessageDescriptor struct {
+	proto          *dpb.DescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	fields         []*FieldDescriptor
+	nested         []*MessageDescriptor
+	enums          []*EnumDescriptor
+	extensions     []*FieldDescriptor
+	oneOfs         []*OneOfDescriptor
+	extRanges      extRanges
+	fqn            string
+	sourceInfoPath []int32
+	jsonNames      jsonNameMap
+	isProto3       bool
+	isMapEntry     bool
+}
+
+func createMessageDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, md *dpb.DescriptorProto, symbols map[string]Descriptor) (*MessageDescriptor, string) {
+	msgName := merge(enclosing, md.GetName())
+	ret := &MessageDescriptor{proto: md, parent: parent, file: fd, fqn: msgName}
+	for _, f := range md.GetField() {
+		fld, n := createFieldDescriptor(fd, ret, msgName, f)
+		symbols[n] = fld
+		ret.fields = append(ret.fields, fld)
+	}
+	for _, nm := range md.NestedType {
+		nmd, n := createMessageDescriptor(fd, ret, msgName, nm, symbols)
+		symbols[n] = nmd
+		ret.nested = append(ret.nested, nmd)
+	}
+	for _, e := range md.EnumType {
+		ed, n := createEnumDescriptor(fd, ret, msgName, e, symbols)
+		symbols[n] = ed
+		ret.enums = append(ret.enums, ed)
+	}
+	for _, ex := range md.GetExtension() {
+		exd, n := createFieldDescriptor(fd, ret, msgName, ex)
+		symbols[n] = exd
+		ret.extensions = append(ret.extensions, exd)
+	}
+	for i, o := range md.GetOneofDecl() {
+		od, n := createOneOfDescriptor(fd, ret, i, msgName, o)
+		symbols[n] = od
+		ret.oneOfs = append(ret.oneOfs, od)
+	}
+	for _, r := range md.GetExtensionRange() {
+		// proto.ExtensionRange is inclusive (and that's how extension ranges are defined in code).
+		// but protoc converts range to exclusive end in descriptor, so we must convert back
+		end := r.GetEnd() - 1
+		ret.extRanges = append(ret.extRanges, proto.ExtensionRange{
+			Start: r.GetStart(),
+			End:   end})
+	}
+	sort.Sort(ret.extRanges)
+	ret.isProto3 = fd.isProto3
+	ret.isMapEntry = md.GetOptions().GetMapEntry() &&
+		len(ret.fields) == 2 &&
+		ret.fields[0].GetNumber() == 1 &&
+		ret.fields[1].GetNumber() == 2
+
+	return ret, msgName
+}
+
+func (md *MessageDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Message_nestedMessagesTag)
+	scopes = append(scopes, messageScope(md))
+	for i, nmd := range md.nested {
+		if err := nmd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_enumsTag
+	for i, ed := range md.enums {
+		ed.resolve(append(path, int32(i)))
+	}
+	path[len(path)-1] = internal.Message_fieldsTag
+	for i, fld := range md.fields {
+		if err := fld.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_extensionsTag
+	for i, exd := range md.extensions {
+		if err := exd.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	path[len(path)-1] = internal.Message_oneOfsTag
+	for i, od := range md.oneOfs {
+		od.resolve(append(path, int32(i)))
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the message.
+func (md *MessageDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the message. This
+// includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (md *MessageDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the message's enclosing descriptor. For top-level messages,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (md *MessageDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this message is defined.
+func (md *MessageDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the message's options. Most usages will be more interested
+// in GetMessageOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMessageOptions returns the message's options.
+func (md *MessageDescriptor) GetMessageOptions() *dpb.MessageOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the message, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// message was defined and also contains comments associated with the message
+// definition.
+func (md *MessageDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MessageDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsDescriptorProto returns the underlying descriptor proto.
+func (md *MessageDescriptor) AsDescriptorProto() *dpb.DescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MessageDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsMapEntry returns true if this is a synthetic message type that represents an entry
+// in a map field.
+func (md *MessageDescriptor) IsMapEntry() bool {
+	return md.isMapEntry
+}
+
+// GetFields returns all of the fields for this message.
+func (md *MessageDescriptor) GetFields() []*FieldDescriptor {
+	return md.fields
+}
+
+// GetNestedMessageTypes returns all of the message types declared inside this message.
+func (md *MessageDescriptor) GetNestedMessageTypes() []*MessageDescriptor {
+	return md.nested
+}
+
+// GetNestedEnumTypes returns all of the enums declared inside this message.
+func (md *MessageDescriptor) GetNestedEnumTypes() []*EnumDescriptor {
+	return md.enums
+}
+
+// GetNestedExtensions returns all of the extensions declared inside this message.
+func (md *MessageDescriptor) GetNestedExtensions() []*FieldDescriptor {
+	return md.extensions
+}
+
+// GetOneOfs returns all of the one-of field sets declared inside this message.
+func (md *MessageDescriptor) GetOneOfs() []*OneOfDescriptor {
+	return md.oneOfs
+}
+
+// IsProto3 returns true if the file in which this message is defined declares a syntax of "proto3".
+func (md *MessageDescriptor) IsProto3() bool {
+	return md.isProto3
+}
+
+// GetExtensionRanges returns the ranges of extension field numbers for this message.
+func (md *MessageDescriptor) GetExtensionRanges() []proto.ExtensionRange {
+	return md.extRanges
+}
+
+// IsExtendable returns true if this message has any extension ranges.
+func (md *MessageDescriptor) IsExtendable() bool {
+	return len(md.extRanges) > 0
+}
+
+// IsExtension returns true if the given tag number is within any of this message's
+// extension ranges.
+func (md *MessageDescriptor) IsExtension(tagNumber int32) bool {
+	return md.extRanges.IsExtension(tagNumber)
+}
+
+type extRanges []proto.ExtensionRange
+
+func (er extRanges) String() string {
+	var buf bytes.Buffer
+	first := true
+	for _, r := range er {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(",")
+		}
+		fmt.Fprintf(&buf, "%d..%d", r.Start, r.End)
+	}
+	return buf.String()
+}
+
+func (er extRanges) IsExtension(tagNumber int32) bool {
+	i := sort.Search(len(er), func(i int) bool { return er[i].End >= tagNumber })
+	return i < len(er) && tagNumber >= er[i].Start
+}
+
+func (er extRanges) Len() int {
+	return len(er)
+}
+
+func (er extRanges) Less(i, j int) bool {
+	return er[i].Start < er[j].Start
+}
+
+func (er extRanges) Swap(i, j int) {
+	er[i], er[j] = er[j], er[i]
+}
+
+// FindFieldByName finds the field with the given name. If no such field exists
+// then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByName(fieldName string) *FieldDescriptor {
+	fqn := fmt.Sprintf("%s.%s", md.fqn, fieldName)
+	if fd, ok := md.file.symbols[fqn].(*FieldDescriptor); ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FindFieldByNumber finds the field with the given tag number. If no such field
+// exists then nil is returned. Only regular fields are returned, not extensions.
+func (md *MessageDescriptor) FindFieldByNumber(tagNumber int32) *FieldDescriptor {
+	if fd, ok := md.file.fieldIndex[md.fqn][tagNumber]; ok && !fd.IsExtension() {
+		return fd
+	} else {
+		return nil
+	}
+}
+
+// FieldDescriptor describes a field of a protocol buffer message.
+type FieldDescriptor struct {
+	proto          *dpb.FieldDescriptorProto
+	parent         Descriptor
+	owner          *MessageDescriptor
+	file           *FileDescriptor
+	oneOf          *OneOfDescriptor
+	msgType        *MessageDescriptor
+	enumType       *EnumDescriptor
+	fqn            string
+	sourceInfoPath []int32
+	def            memoizedDefault
+	isMap          bool
+}
+
+func createFieldDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, fld *dpb.FieldDescriptorProto) (*FieldDescriptor, string) {
+	fldName := merge(enclosing, fld.GetName())
+	ret := &FieldDescriptor{proto: fld, parent: parent, file: fd, fqn: fldName}
+	if fld.GetExtendee() == "" {
+		ret.owner = parent.(*MessageDescriptor)
+	}
+	// owner for extensions, field type (be it message or enum), and one-ofs get resolved later
+	return ret, fldName
+}
+
+func (fd *FieldDescriptor) resolve(path []int32, scopes []scope) error {
+	if fd.proto.OneofIndex != nil && fd.oneOf == nil {
+		return fmt.Errorf("could not link field %s to one-of index %d", fd.fqn, *fd.proto.OneofIndex)
+	}
+	fd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_ENUM {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.enumType = desc.(*EnumDescriptor)
+		}
+	}
+	if fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE || fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+		if desc, err := resolve(fd.file, fd.proto.GetTypeName(), scopes); err != nil {
+			return err
+		} else {
+			fd.msgType = desc.(*MessageDescriptor)
+		}
+	}
+	if fd.proto.GetExtendee() != "" {
+		if desc, err := resolve(fd.file, fd.proto.GetExtendee(), scopes); err != nil {
+			return err
+		} else {
+			fd.owner = desc.(*MessageDescriptor)
+		}
+	}
+	fd.file.registerField(fd)
+	fd.isMap = fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED &&
+		fd.proto.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().IsMapEntry()
+	return nil
+}
+
+func (fd *FieldDescriptor) determineDefault() interface{} {
+	if fd.IsMap() {
+		return map[interface{}]interface{}(nil)
+	} else if fd.IsRepeated() {
+		return []interface{}(nil)
+	} else if fd.msgType != nil {
+		return nil
+	}
+
+	proto3 := fd.file.isProto3
+	if !proto3 {
+		def := fd.AsFieldDescriptorProto().GetDefaultValue()
+		if def != "" {
+			ret := parseDefaultValue(fd, def)
+			if ret != nil {
+				return ret
+			}
+			// if we can't parse default value, fall-through to return normal default...
+		}
+	}
+
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_FIXED32,
+		dpb.FieldDescriptorProto_TYPE_UINT32:
+		return uint32(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED32,
+		dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32:
+		return int32(0)
+	case dpb.FieldDescriptorProto_TYPE_FIXED64,
+		dpb.FieldDescriptorProto_TYPE_UINT64:
+		return uint64(0)
+	case dpb.FieldDescriptorProto_TYPE_SFIXED64,
+		dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64:
+		return int64(0)
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		return float32(0.0)
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		return float64(0.0)
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		return false
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(nil)
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return ""
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		if proto3 {
+			return int32(0)
+		}
+		enumVals := fd.GetEnumType().GetValues()
+		if len(enumVals) > 0 {
+			return enumVals[0].GetNumber()
+		} else {
+			return int32(0) // WTF?
+		}
+	default:
+		panic(fmt.Sprintf("Unknown field type: %v", fd.GetType()))
+	}
+}
+
+func parseDefaultValue(fd *FieldDescriptor, val string) interface{} {
+	switch fd.GetType() {
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		vd := fd.GetEnumType().FindValueByName(val)
+		if vd != nil {
+			return vd.GetNumber()
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		if val == "true" {
+			return true
+		} else if val == "false" {
+			return false
+		}
+		return nil
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		return []byte(unescape(val))
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		return val
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := strconv.ParseFloat(val, 32); err == nil {
+			return float32(f)
+		} else {
+			return float32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		if f, err := strconv.ParseFloat(val, 64); err == nil {
+			return f
+		} else {
+			return float64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT32,
+		dpb.FieldDescriptorProto_TYPE_SINT32,
+		dpb.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := strconv.ParseInt(val, 10, 32); err == nil {
+			return int32(i)
+		} else {
+			return int32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT32,
+		dpb.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := strconv.ParseUint(val, 10, 32); err == nil {
+			return uint32(i)
+		} else {
+			return uint32(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_INT64,
+		dpb.FieldDescriptorProto_TYPE_SINT64,
+		dpb.FieldDescriptorProto_TYPE_SFIXED64:
+		if i, err := strconv.ParseInt(val, 10, 64); err == nil {
+			return i
+		} else {
+			return int64(0)
+		}
+	case dpb.FieldDescriptorProto_TYPE_UINT64,
+		dpb.FieldDescriptorProto_TYPE_FIXED64:
+		if i, err := strconv.ParseUint(val, 10, 64); err == nil {
+			return i
+		} else {
+			return uint64(0)
+		}
+	default:
+		return nil
+	}
+}
+
+func unescape(s string) string {
+	// protoc encodes default values for 'bytes' fields using C escaping,
+	// so this function reverses that escaping
+	out := make([]byte, 0, len(s))
+	var buf [4]byte
+	for len(s) > 0 {
+		if s[0] != '\\' || len(s) < 2 {
+			// not escape sequence, or too short to be well-formed escape
+			out = append(out, s[0])
+			s = s[1:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			n := matchPrefix(s[2:], 2, isHex)
+			if n == 0 {
+				// bad escape
+				out = append(out, s[:2]...)
+				s = s[2:]
+			} else {
+				c, err := strconv.ParseUint(s[2:2+n], 16, 8)
+				if err != nil {
+					// shouldn't really happen...
+					out = append(out, s[:2+n]...)
+				} else {
+					out = append(out, byte(c))
+				}
+				s = s[2+n:]
+			}
+		} else if s[1] >= '0' && s[1] <= '7' {
+			n := 1 + matchPrefix(s[2:], 2, isOctal)
+			c, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil || c > 0xff {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(c))
+			}
+			s = s[1+n:]
+		} else if s[1] == 'u' {
+			if len(s) < 6 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:6], 16, 16)
+				if err != nil {
+					// bad escape
+					out = append(out, s[:6]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[6:]
+			}
+		} else if s[1] == 'U' {
+			if len(s) < 10 {
+				// bad escape
+				out = append(out, s...)
+				s = s[len(s):]
+			} else {
+				c, err := strconv.ParseUint(s[2:10], 16, 32)
+				if err != nil || c > 0x10ffff {
+					// bad escape
+					out = append(out, s[:10]...)
+				} else {
+					w := utf8.EncodeRune(buf[:], rune(c))
+					out = append(out, buf[:w]...)
+				}
+				s = s[10:]
+			}
+		} else {
+			switch s[1] {
+			case 'a':
+				out = append(out, '\a')
+			case 'b':
+				out = append(out, '\b')
+			case 'f':
+				out = append(out, '\f')
+			case 'n':
+				out = append(out, '\n')
+			case 'r':
+				out = append(out, '\r')
+			case 't':
+				out = append(out, '\t')
+			case 'v':
+				out = append(out, '\v')
+			case '\\':
+				out = append(out, '\\')
+			case '\'':
+				out = append(out, '\'')
+			case '"':
+				out = append(out, '"')
+			case '?':
+				out = append(out, '?')
+			default:
+				// invalid escape, just copy it as-is
+				out = append(out, s[:2]...)
+			}
+			s = s[2:]
+		}
+	}
+	return string(out)
+}
+
+func isOctal(b byte) bool { return b >= '0' && b <= '7' }
+func isHex(b byte) bool {
+	return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
+}
+func matchPrefix(s string, limit int, fn func(byte) bool) int {
+	l := len(s)
+	if l > limit {
+		l = limit
+	}
+	i := 0
+	for ; i < l; i++ {
+		if !fn(s[i]) {
+			return i
+		}
+	}
+	return i
+}
+
+// GetName returns the name of the field.
+func (fd *FieldDescriptor) GetName() string {
+	return fd.proto.GetName()
+}
+
+// GetNumber returns the tag number of this field.
+func (fd *FieldDescriptor) GetNumber() int32 {
+	return fd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the field. Unlike
+// GetName, this includes fully qualified name of the enclosing message for
+// regular fields.
+//
+// For extension fields, this includes the package (if there is one) as well as
+// any enclosing messages. The package and/or enclosing messages are for where
+// the extension is defined, not the message it extends.
+//
+// If this field is part of a one-of, the fully qualified name does *not*
+// include the name of the one-of, only of the enclosing message.
+func (fd *FieldDescriptor) GetFullyQualifiedName() string {
+	return fd.fqn
+}
+
+// GetParent returns the fields's enclosing descriptor. For normal
+// (non-extension) fields, this is the enclosing message. For extensions, this
+// is the descriptor in which the extension is defined, not the message that is
+// extended. The parent for an extension may be a file descriptor or a message,
+// depending on where the extension is defined.
+func (fd *FieldDescriptor) GetParent() Descriptor {
+	return fd.parent
+}
+
+// GetFile returns the descriptor for the file in which this field is defined.
+func (fd *FieldDescriptor) GetFile() *FileDescriptor {
+	return fd.file
+}
+
+// GetOptions returns the field's options. Most usages will be more interested
+// in GetFieldOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) GetOptions() proto.Message {
+	return fd.proto.GetOptions()
+}
+
+// GetFieldOptions returns the field's options.
+func (fd *FieldDescriptor) GetFieldOptions() *dpb.FieldOptions {
+	return fd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the field, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// field was defined and also contains comments associated with the field
+// definition.
+func (fd *FieldDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return fd.file.sourceInfo.Get(fd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsFieldDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (fd *FieldDescriptor) AsProto() proto.Message {
+	return fd.proto
+}
+
+// AsFieldDescriptorProto returns the underlying descriptor proto.
+func (fd *FieldDescriptor) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+	return fd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (fd *FieldDescriptor) String() string {
+	return fd.proto.String()
+}
+
+// GetJSONName returns the name of the field as referenced in the message's JSON
+// format.
+func (fd *FieldDescriptor) GetJSONName() string {
+	if jsonName := fd.proto.GetJsonName(); jsonName != "" {
+		return jsonName
+	}
+	return fd.proto.GetName()
+}
+
+// GetFullyQualifiedJSONName returns the JSON format name (same as GetJSONName),
+// but includes the fully qualified name of the enclosing message.
+//
+// If the field is an extension, it will return the package name (if there is
+// one) as well as the names of any enclosing messages. The package and/or
+// enclosing messages are for where the extension is defined, not the message it
+// extends.
+func (fd *FieldDescriptor) GetFullyQualifiedJSONName() string {
+	parent := fd.GetParent()
+	switch parent := parent.(type) {
+	case *FileDescriptor:
+		pkg := parent.GetPackage()
+		if pkg == "" {
+			return fd.GetJSONName()
+		}
+		return fmt.Sprintf("%s.%s", pkg, fd.GetJSONName())
+	default:
+		return fmt.Sprintf("%s.%s", parent.GetFullyQualifiedName(), fd.GetJSONName())
+	}
+}
+
+// GetOwner returns the message type that this field belongs to. If this is a normal
+// field then this is the same as GetParent. But for extensions, this will be the
+// extendee message whereas GetParent refers to where the extension was declared.
+func (fd *FieldDescriptor) GetOwner() *MessageDescriptor {
+	return fd.owner
+}
+
+// IsExtension returns true if this is an extension field.
+func (fd *FieldDescriptor) IsExtension() bool {
+	return fd.proto.GetExtendee() != ""
+}
+
+// GetOneOf returns the one-of field set to which this field belongs. If this field
+// is not part of a one-of then this method returns nil.
+func (fd *FieldDescriptor) GetOneOf() *OneOfDescriptor {
+	return fd.oneOf
+}
+
+// GetType returns the type of this field. If the type indicates an enum, the
+// enum type can be queried via GetEnumType. If the type indicates a message, the
+// message type can be queried via GetMessageType.
+func (fd *FieldDescriptor) GetType() dpb.FieldDescriptorProto_Type {
+	return fd.proto.GetType()
+}
+
+// GetLabel returns the label for this field. The label can be required (proto2-only),
+// optional (default for proto3), or required.
+func (fd *FieldDescriptor) GetLabel() dpb.FieldDescriptorProto_Label {
+	return fd.proto.GetLabel()
+}
+
+// IsRequired returns true if this field has the "required" label.
+func (fd *FieldDescriptor) IsRequired() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// IsRepeated returns true if this field has the "repeated" label.
+func (fd *FieldDescriptor) IsRepeated() bool {
+	return fd.proto.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// IsMap returns true if this is a map field. If so, it will have the "repeated"
+// label its type will be a message that represents a map entry. The map entry
+// message will have exactly two fields: tag #1 is the key and tag #2 is the value.
+func (fd *FieldDescriptor) IsMap() bool {
+	return fd.isMap
+}
+
+// GetMapKeyType returns the type of the key field if this is a map field. If it is
+// not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapKeyType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(1))
+	}
+	return nil
+}
+
+// GetMapValueType returns the type of the value field if this is a map field. If it
+// is not a map field, nil is returned.
+func (fd *FieldDescriptor) GetMapValueType() *FieldDescriptor {
+	if fd.isMap {
+		return fd.msgType.FindFieldByNumber(int32(2))
+	}
+	return nil
+}
+
+// GetMessageType returns the type of this field if it is a message type. If
+// this field is not a message type, it returns nil.
+func (fd *FieldDescriptor) GetMessageType() *MessageDescriptor {
+	return fd.msgType
+}
+
+// GetEnumType returns the type of this field if it is an enum type. If this
+// field is not an enum type, it returns nil.
+func (fd *FieldDescriptor) GetEnumType() *EnumDescriptor {
+	return fd.enumType
+}
+
+// GetDefaultValue returns the default value for this field.
+//
+// If this field represents a message type, this method always returns nil (even though
+// for proto2 files, the default value should be a default instance of the message type).
+// If the field represents an enum type, this method returns an int32 corresponding to the
+// enum value. If this field is a map, it returns a nil map[interface{}]interface{}. If
+// this field is repeated (and not a map), it returns a nil []interface{}.
+//
+// Otherwise, it returns the declared default value for the field or a zero value, if no
+// default is declared or if the file is proto3. The type of said return value corresponds
+// to the type of the field:
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+func (fd *FieldDescriptor) GetDefaultValue() interface{} {
+	return fd.getDefaultValue()
+}
+
+// EnumDescriptor describes an enum declared in a proto file.
+type EnumDescriptor struct {
+	proto          *dpb.EnumDescriptorProto
+	parent         Descriptor
+	file           *FileDescriptor
+	values         []*EnumValueDescriptor
+	valuesByNum    sortedValues
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumDescriptor(fd *FileDescriptor, parent Descriptor, enclosing string, ed *dpb.EnumDescriptorProto, symbols map[string]Descriptor) (*EnumDescriptor, string) {
+	enumName := merge(enclosing, ed.GetName())
+	ret := &EnumDescriptor{proto: ed, parent: parent, file: fd, fqn: enumName}
+	for _, ev := range ed.GetValue() {
+		evd, n := createEnumValueDescriptor(fd, ret, enumName, ev)
+		symbols[n] = evd
+		ret.values = append(ret.values, evd)
+	}
+	if len(ret.values) > 0 {
+		ret.valuesByNum = make(sortedValues, len(ret.values))
+		copy(ret.valuesByNum, ret.values)
+		sort.Stable(ret.valuesByNum)
+	}
+	return ret, enumName
+}
+
+type sortedValues []*EnumValueDescriptor
+
+func (sv sortedValues) Len() int {
+	return len(sv)
+}
+
+func (sv sortedValues) Less(i, j int) bool {
+	return sv[i].GetNumber() < sv[j].GetNumber()
+}
+
+func (sv sortedValues) Swap(i, j int) {
+	sv[i], sv[j] = sv[j], sv[i]
+}
+
+func (ed *EnumDescriptor) resolve(path []int32) {
+	ed.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Enum_valuesTag)
+	for i, evd := range ed.values {
+		evd.resolve(append(path, int32(i)))
+	}
+}
+
+// GetName returns the simple (unqualified) name of the enum type.
+func (ed *EnumDescriptor) GetName() string {
+	return ed.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum type.
+// This includes the package name (if there is one) as well as the names of any
+// enclosing messages.
+func (ed *EnumDescriptor) GetFullyQualifiedName() string {
+	return ed.fqn
+}
+
+// GetParent returns the enum type's enclosing descriptor. For top-level enums,
+// this will be a file descriptor. Otherwise it will be the descriptor for the
+// enclosing message.
+func (ed *EnumDescriptor) GetParent() Descriptor {
+	return ed.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum is defined.
+func (ed *EnumDescriptor) GetFile() *FileDescriptor {
+	return ed.file
+}
+
+// GetOptions returns the enum type's options. Most usages will be more
+// interested in GetEnumOptions, which has a concrete return type. This generic
+// version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) GetOptions() proto.Message {
+	return ed.proto.GetOptions()
+}
+
+// GetEnumOptions returns the enum type's options.
+func (ed *EnumDescriptor) GetEnumOptions() *dpb.EnumOptions {
+	return ed.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum type, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum type was defined and also contains comments associated with the enum
+// definition.
+func (ed *EnumDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return ed.file.sourceInfo.Get(ed.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (ed *EnumDescriptor) AsProto() proto.Message {
+	return ed.proto
+}
+
+// AsEnumDescriptorProto returns the underlying descriptor proto.
+func (ed *EnumDescriptor) AsEnumDescriptorProto() *dpb.EnumDescriptorProto {
+	return ed.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (ed *EnumDescriptor) String() string {
+	return ed.proto.String()
+}
+
+// GetValues returns all of the allowed values defined for this enum.
+func (ed *EnumDescriptor) GetValues() []*EnumValueDescriptor {
+	return ed.values
+}
+
+// FindValueByName finds the enum value with the given name. If no such value exists
+// then nil is returned.
+func (ed *EnumDescriptor) FindValueByName(name string) *EnumValueDescriptor {
+	fqn := fmt.Sprintf("%s.%s", ed.fqn, name)
+	if vd, ok := ed.file.symbols[fqn].(*EnumValueDescriptor); ok {
+		return vd
+	} else {
+		return nil
+	}
+}
+
+// FindValueByNumber finds the value with the given numeric value. If no such value
+// exists then nil is returned. If aliases are allowed and multiple values have the
+// given number, the first declared value is returned.
+func (ed *EnumDescriptor) FindValueByNumber(num int32) *EnumValueDescriptor {
+	index := sort.Search(len(ed.valuesByNum), func(i int) bool { return ed.valuesByNum[i].GetNumber() >= num })
+	if index < len(ed.valuesByNum) {
+		vd := ed.valuesByNum[index]
+		if vd.GetNumber() == num {
+			return vd
+		}
+	}
+	return nil
+}
+
+// EnumValueDescriptor describes an allowed value of an enum declared in a proto file.
+type EnumValueDescriptor struct {
+	proto          *dpb.EnumValueDescriptorProto
+	parent         *EnumDescriptor
+	file           *FileDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createEnumValueDescriptor(fd *FileDescriptor, parent *EnumDescriptor, enclosing string, evd *dpb.EnumValueDescriptorProto) (*EnumValueDescriptor, string) {
+	valName := merge(enclosing, evd.GetName())
+	return &EnumValueDescriptor{proto: evd, parent: parent, file: fd, fqn: valName}, valName
+}
+
+func (vd *EnumValueDescriptor) resolve(path []int32) {
+	vd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the enum value.
+func (vd *EnumValueDescriptor) GetName() string {
+	return vd.proto.GetName()
+}
+
+// GetNumber returns the numeric value associated with this enum value.
+func (vd *EnumValueDescriptor) GetNumber() int32 {
+	return vd.proto.GetNumber()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the enum value.
+// Unlike GetName, this includes fully qualified name of the enclosing enum.
+func (vd *EnumValueDescriptor) GetFullyQualifiedName() string {
+	return vd.fqn
+}
+
+// GetParent returns the descriptor for the enum in which this enum value is
+// defined. Most usages will prefer to use GetEnum, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetParent() Descriptor {
+	return vd.parent
+}
+
+// GetEnum returns the enum in which this enum value is defined.
+func (vd *EnumValueDescriptor) GetEnum() *EnumDescriptor {
+	return vd.parent
+}
+
+// GetFile returns the descriptor for the file in which this enum value is
+// defined.
+func (vd *EnumValueDescriptor) GetFile() *FileDescriptor {
+	return vd.file
+}
+
+// GetOptions returns the enum value's options. Most usages will be more
+// interested in GetEnumValueOptions, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) GetOptions() proto.Message {
+	return vd.proto.GetOptions()
+}
+
+// GetEnumValueOptions returns the enum value's options.
+func (vd *EnumValueDescriptor) GetEnumValueOptions() *dpb.EnumValueOptions {
+	return vd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the enum value, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// enum value was defined and also contains comments associated with the enum
+// value definition.
+func (vd *EnumValueDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return vd.file.sourceInfo.Get(vd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsEnumValueDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (vd *EnumValueDescriptor) AsProto() proto.Message {
+	return vd.proto
+}
+
+// AsEnumValueDescriptorProto returns the underlying descriptor proto.
+func (vd *EnumValueDescriptor) AsEnumValueDescriptorProto() *dpb.EnumValueDescriptorProto {
+	return vd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (vd *EnumValueDescriptor) String() string {
+	return vd.proto.String()
+}
+
+// ServiceDescriptor describes an RPC service declared in a proto file.
+type ServiceDescriptor struct {
+	proto          *dpb.ServiceDescriptorProto
+	file           *FileDescriptor
+	methods        []*MethodDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createServiceDescriptor(fd *FileDescriptor, enclosing string, sd *dpb.ServiceDescriptorProto, symbols map[string]Descriptor) (*ServiceDescriptor, string) {
+	serviceName := merge(enclosing, sd.GetName())
+	ret := &ServiceDescriptor{proto: sd, file: fd, fqn: serviceName}
+	for _, m := range sd.GetMethod() {
+		md, n := createMethodDescriptor(fd, ret, serviceName, m)
+		symbols[n] = md
+		ret.methods = append(ret.methods, md)
+	}
+	return ret, serviceName
+}
+
+func (sd *ServiceDescriptor) resolve(path []int32, scopes []scope) error {
+	sd.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	path = append(path, internal.Service_methodsTag)
+	for i, md := range sd.methods {
+		if err := md.resolve(append(path, int32(i)), scopes); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetName returns the simple (unqualified) name of the service.
+func (sd *ServiceDescriptor) GetName() string {
+	return sd.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the service. This
+// includes the package name (if there is one).
+func (sd *ServiceDescriptor) GetFullyQualifiedName() string {
+	return sd.fqn
+}
+
+// GetParent returns the descriptor for the file in which this service is
+// defined. Most usages will prefer to use GetFile, which has a concrete return
+// type. This more generic method is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetParent() Descriptor {
+	return sd.file
+}
+
+// GetFile returns the descriptor for the file in which this service is defined.
+func (sd *ServiceDescriptor) GetFile() *FileDescriptor {
+	return sd.file
+}
+
+// GetOptions returns the service's options. Most usages will be more interested
+// in GetServiceOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) GetOptions() proto.Message {
+	return sd.proto.GetOptions()
+}
+
+// GetServiceOptions returns the service's options.
+func (sd *ServiceDescriptor) GetServiceOptions() *dpb.ServiceOptions {
+	return sd.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the service, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// service was defined and also contains comments associated with the service
+// definition.
+func (sd *ServiceDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return sd.file.sourceInfo.Get(sd.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsServiceDescriptorProto, which has a concrete return type.
+// This generic version is present to satisfy the Descriptor interface.
+func (sd *ServiceDescriptor) AsProto() proto.Message {
+	return sd.proto
+}
+
+// AsServiceDescriptorProto returns the underlying descriptor proto.
+func (sd *ServiceDescriptor) AsServiceDescriptorProto() *dpb.ServiceDescriptorProto {
+	return sd.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (sd *ServiceDescriptor) String() string {
+	return sd.proto.String()
+}
+
+// GetMethods returns all of the RPC methods for this service.
+func (sd *ServiceDescriptor) GetMethods() []*MethodDescriptor {
+	return sd.methods
+}
+
+// FindMethodByName finds the method with the given name. If no such method exists
+// then nil is returned.
+func (sd *ServiceDescriptor) FindMethodByName(name string) *MethodDescriptor {
+	fqn := fmt.Sprintf("%s.%s", sd.fqn, name)
+	if md, ok := sd.file.symbols[fqn].(*MethodDescriptor); ok {
+		return md
+	} else {
+		return nil
+	}
+}
+
+// MethodDescriptor describes an RPC method declared in a proto file.
+type MethodDescriptor struct {
+	proto          *dpb.MethodDescriptorProto
+	parent         *ServiceDescriptor
+	file           *FileDescriptor
+	inType         *MessageDescriptor
+	outType        *MessageDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createMethodDescriptor(fd *FileDescriptor, parent *ServiceDescriptor, enclosing string, md *dpb.MethodDescriptorProto) (*MethodDescriptor, string) {
+	// request and response types get resolved later
+	methodName := merge(enclosing, md.GetName())
+	return &MethodDescriptor{proto: md, parent: parent, file: fd, fqn: methodName}, methodName
+}
+
+func (md *MethodDescriptor) resolve(path []int32, scopes []scope) error {
+	md.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+	if desc, err := resolve(md.file, md.proto.GetInputType(), scopes); err != nil {
+		return err
+	} else {
+		md.inType = desc.(*MessageDescriptor)
+	}
+	if desc, err := resolve(md.file, md.proto.GetOutputType(), scopes); err != nil {
+		return err
+	} else {
+		md.outType = desc.(*MessageDescriptor)
+	}
+	return nil
+}
+
+// GetName returns the name of the method.
+func (md *MethodDescriptor) GetName() string {
+	return md.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the method. Unlike
+// GetName, this includes fully qualified name of the enclosing service.
+func (md *MethodDescriptor) GetFullyQualifiedName() string {
+	return md.fqn
+}
+
+// GetParent returns the descriptor for the service in which this method is
+// defined. Most usages will prefer to use GetService, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (md *MethodDescriptor) GetParent() Descriptor {
+	return md.parent
+}
+
+// GetService returns the RPC service in which this method is declared.
+func (md *MethodDescriptor) GetService() *ServiceDescriptor {
+	return md.parent
+}
+
+// GetFile returns the descriptor for the file in which this method is defined.
+func (md *MethodDescriptor) GetFile() *FileDescriptor {
+	return md.file
+}
+
+// GetOptions returns the method's options. Most usages will be more interested
+// in GetMethodOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) GetOptions() proto.Message {
+	return md.proto.GetOptions()
+}
+
+// GetMethodOptions returns the method's options.
+func (md *MethodDescriptor) GetMethodOptions() *dpb.MethodOptions {
+	return md.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the method, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// method was defined and also contains comments associated with the method
+// definition.
+func (md *MethodDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return md.file.sourceInfo.Get(md.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsMethodDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (md *MethodDescriptor) AsProto() proto.Message {
+	return md.proto
+}
+
+// AsMethodDescriptorProto returns the underlying descriptor proto.
+func (md *MethodDescriptor) AsMethodDescriptorProto() *dpb.MethodDescriptorProto {
+	return md.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (md *MethodDescriptor) String() string {
+	return md.proto.String()
+}
+
+// IsServerStreaming returns true if this is a server-streaming method.
+func (md *MethodDescriptor) IsServerStreaming() bool {
+	return md.proto.GetServerStreaming()
+}
+
+// IsClientStreaming returns true if this is a client-streaming method.
+func (md *MethodDescriptor) IsClientStreaming() bool {
+	return md.proto.GetClientStreaming()
+}
+
+// GetInputType returns the input type, or request type, of the RPC method.
+func (md *MethodDescriptor) GetInputType() *MessageDescriptor {
+	return md.inType
+}
+
+// GetOutputType returns the output type, or response type, of the RPC method.
+func (md *MethodDescriptor) GetOutputType() *MessageDescriptor {
+	return md.outType
+}
+
+// OneOfDescriptor describes a one-of field set declared in a protocol buffer message.
+type OneOfDescriptor struct {
+	proto          *dpb.OneofDescriptorProto
+	parent         *MessageDescriptor
+	file           *FileDescriptor
+	choices        []*FieldDescriptor
+	fqn            string
+	sourceInfoPath []int32
+}
+
+func createOneOfDescriptor(fd *FileDescriptor, parent *MessageDescriptor, index int, enclosing string, od *dpb.OneofDescriptorProto) (*OneOfDescriptor, string) {
+	oneOfName := merge(enclosing, od.GetName())
+	ret := &OneOfDescriptor{proto: od, parent: parent, file: fd, fqn: oneOfName}
+	for _, f := range parent.fields {
+		oi := f.proto.OneofIndex
+		if oi != nil && *oi == int32(index) {
+			f.oneOf = ret
+			ret.choices = append(ret.choices, f)
+		}
+	}
+	return ret, oneOfName
+}
+
+func (od *OneOfDescriptor) resolve(path []int32) {
+	od.sourceInfoPath = append([]int32(nil), path...) // defensive copy
+}
+
+// GetName returns the name of the one-of.
+func (od *OneOfDescriptor) GetName() string {
+	return od.proto.GetName()
+}
+
+// GetFullyQualifiedName returns the fully qualified name of the one-of. Unlike
+// GetName, this includes fully qualified name of the enclosing message.
+func (od *OneOfDescriptor) GetFullyQualifiedName() string {
+	return od.fqn
+}
+
+// GetParent returns the descriptor for the message in which this one-of is
+// defined. Most usages will prefer to use GetOwner, which has a concrete
+// return type. This more generic method is present to satisfy the Descriptor
+// interface.
+func (od *OneOfDescriptor) GetParent() Descriptor {
+	return od.parent
+}
+
+// GetOwner returns the message to which this one-of field set belongs.
+func (od *OneOfDescriptor) GetOwner() *MessageDescriptor {
+	return od.parent
+}
+
+// GetFile returns the descriptor for the file in which this one-fof is defined.
+func (od *OneOfDescriptor) GetFile() *FileDescriptor {
+	return od.file
+}
+
+// GetOptions returns the one-of's options. Most usages will be more interested
+// in GetOneOfOptions, which has a concrete return type. This generic version
+// is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) GetOptions() proto.Message {
+	return od.proto.GetOptions()
+}
+
+// GetOneOfOptions returns the one-of's options.
+func (od *OneOfDescriptor) GetOneOfOptions() *dpb.OneofOptions {
+	return od.proto.GetOptions()
+}
+
+// GetSourceInfo returns source info for the one-of, if present in the
+// descriptor. Not all descriptors will contain source info. If non-nil, the
+// returned info contains information about the location in the file where the
+// one-of was defined and also contains comments associated with the one-of
+// definition.
+func (od *OneOfDescriptor) GetSourceInfo() *dpb.SourceCodeInfo_Location {
+	return od.file.sourceInfo.Get(od.sourceInfoPath)
+}
+
+// AsProto returns the underlying descriptor proto. Most usages will be more
+// interested in AsOneofDescriptorProto, which has a concrete return type. This
+// generic version is present to satisfy the Descriptor interface.
+func (od *OneOfDescriptor) AsProto() proto.Message {
+	return od.proto
+}
+
+// AsOneofDescriptorProto returns the underlying descriptor proto.
+func (od *OneOfDescriptor) AsOneofDescriptorProto() *dpb.OneofDescriptorProto {
+	return od.proto
+}
+
+// String returns the underlying descriptor proto, in compact text format.
+func (od *OneOfDescriptor) String() string {
+	return od.proto.String()
+}
+
+// GetChoices returns the fields that are part of the one-of field set. At most one of
+// these fields may be set for a given message.
+func (od *OneOfDescriptor) GetChoices() []*FieldDescriptor {
+	return od.choices
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(string) Descriptor
+
+func fileScope(fd *FileDescriptor) scope {
+	// we search symbols in this file, but also symbols in other files that have
+	// the same package as this file or a "parent" package (in protobuf,
+	// packages are a hierarchy like C++ namespaces)
+	prefixes := internal.CreatePrefixList(fd.proto.GetPackage())
+	return func(name string) Descriptor {
+		for _, prefix := range prefixes {
+			n := merge(prefix, name)
+			d := findSymbol(fd, n, false)
+			if d != nil {
+				return d
+			}
+		}
+		return nil
+	}
+}
+
+func messageScope(md *MessageDescriptor) scope {
+	return func(name string) Descriptor {
+		n := merge(md.fqn, name)
+		if d, ok := md.file.symbols[n]; ok {
+			return d
+		}
+		return nil
+	}
+}
+
+func resolve(fd *FileDescriptor, name string, scopes []scope) (Descriptor, error) {
+	if strings.HasPrefix(name, ".") {
+		// already fully-qualified
+		d := findSymbol(fd, name[1:], false)
+		if d != nil {
+			return d, nil
+		}
+	} else {
+		// unqualified, so we look in the enclosing (last) scope first and move
+		// towards outermost (first) scope, trying to resolve the symbol
+		for i := len(scopes) - 1; i >= 0; i-- {
+			d := scopes[i](name)
+			if d != nil {
+				return d, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("file %q included an unresolvable reference to %q", fd.proto.GetName(), name)
+}
+
+func findSymbol(fd *FileDescriptor, name string, public bool) Descriptor {
+	d := fd.symbols[name]
+	if d != nil {
+		return d
+	}
+
+	// When public = false, we are searching only directly imported symbols. But we
+	// also need to search transitive public imports due to semantics of public imports.
+	var deps []*FileDescriptor
+	if public {
+		deps = fd.publicDeps
+	} else {
+		deps = fd.deps
+	}
+	for _, dep := range deps {
+		d = findSymbol(dep, name, true)
+		if d != nil {
+			return d
+		}
+	}
+
+	return nil
+}
+
+func merge(a, b string) string {
+	if a == "" {
+		return b
+	} else {
+		return a + "." + b
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
new file mode 100644
index 0000000..d8e2df0
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_no_unsafe.go
@@ -0,0 +1,30 @@
+//+build appengine
+// TODO: other build tags for environments where unsafe package is inappropriate
+
+package desc
+
+type jsonNameMap struct{}
+type memoizedDefault struct{}
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: With allowed use of unsafe, we use it to atomically define an index
+	// via atomic.LoadPointer/atomic.StorePointer. Without it, we skip the index
+	// and do an linear scan of fields each time.
+	for _, f := range md.fields {
+		jn := f.proto.GetJsonName()
+		if jn == "" {
+			jn = f.proto.GetName()
+		}
+		if jn == jsonName {
+			return f
+		}
+	}
+	return nil
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	return fd.determineDefault()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
new file mode 100644
index 0000000..6ff872f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/descriptor_unsafe.go
@@ -0,0 +1,59 @@
+//+build !appengine
+// TODO: exclude other build tags for environments where unsafe package is inappropriate
+
+package desc
+
+import (
+	"sync/atomic"
+	"unsafe"
+)
+
+type jsonNameMap map[string]*FieldDescriptor // loaded/stored atomically via atomic+unsafe
+type memoizedDefault *interface{}            // loaded/stored atomically via atomic+unsafe
+
+// FindFieldByJSONName finds the field with the given JSON field name. If no such
+// field exists then nil is returned. Only regular fields are returned, not
+// extensions.
+func (md *MessageDescriptor) FindFieldByJSONName(jsonName string) *FieldDescriptor {
+	// NB: We don't want to eagerly index JSON names because many programs won't use it.
+	// So we want to do it lazily, but also make sure the result is thread-safe. So we
+	// atomically load/store the map as if it were a normal pointer. We don't use other
+	// mechanisms -- like sync.Mutex, sync.RWMutex, sync.Once, or atomic.Value -- to
+	// do this lazily because those types cannot be copied, and we'd rather not induce
+	// 'go vet' errors in programs that use descriptors and try to copy them.
+	// If multiple goroutines try to access the index at the same time, before it is
+	// built, they will all end up computing the index redundantly. Future reads of
+	// the index will use whatever was the "last one stored" by those racing goroutines.
+	// Since building the index is deterministic, this is fine: all indices computed
+	// will be the same.
+	addrOfJsonNames := (*unsafe.Pointer)(unsafe.Pointer(&md.jsonNames))
+	jsonNames := atomic.LoadPointer(addrOfJsonNames)
+	var index map[string]*FieldDescriptor
+	if jsonNames == nil {
+		// slow path: compute the index
+		index = map[string]*FieldDescriptor{}
+		for _, f := range md.fields {
+			jn := f.proto.GetJsonName()
+			if jn == "" {
+				jn = f.proto.GetName()
+			}
+			index[jn] = f
+		}
+		atomic.StorePointer(addrOfJsonNames, *(*unsafe.Pointer)(unsafe.Pointer(&index)))
+	} else {
+		*(*unsafe.Pointer)(unsafe.Pointer(&index)) = jsonNames
+	}
+	return index[jsonName]
+}
+
+func (fd *FieldDescriptor) getDefaultValue() interface{} {
+	addrOfDef := (*unsafe.Pointer)(unsafe.Pointer(&fd.def))
+	def := atomic.LoadPointer(addrOfDef)
+	if def != nil {
+		return *(*interface{})(def)
+	}
+	// slow path: compute the default, potentially involves decoding value
+	d := fd.determineDefault()
+	atomic.StorePointer(addrOfDef, (unsafe.Pointer(&d)))
+	return d
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/doc.go b/vendor/github.com/jhump/protoreflect/desc/doc.go
new file mode 100644
index 0000000..1740dce
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/doc.go
@@ -0,0 +1,41 @@
+// Package desc contains "rich descriptors" for protocol buffers. The built-in
+// descriptor types are simple protobuf messages, each one representing a
+// different kind of element in the AST of a .proto source file.
+//
+// Because of this inherent "tree" quality, these build-in descriptors cannot
+// refer to their enclosing file descriptor. Nor can a field descriptor refer to
+// a message or enum descriptor that represents the field's type (for enum and
+// nested message fields). All such links must instead be stringly typed. This
+// limitation makes them much harder to use for doing interesting things with
+// reflection.
+//
+// Without this package, resolving references to types is particularly complex.
+// For example, resolving a field's type, the message type an extension extends,
+// or the request and response types of an RPC method all require searching
+// through symbols defined not only in the file in which these elements are
+// declared but also in its transitive closure of dependencies.
+//
+// "Rich descriptors" avoid the need to deal with the complexities described
+// above. A rich descriptor has all type references resolved and provides
+// methods to access other rich descriptors for all referenced elements. Each
+// rich descriptor has a usefully broad API, but does not try to mimic the full
+// interface of the underlying descriptor proto. Instead, every rich descriptor
+// provides access to that underlying proto, for extracting descriptor
+// properties that are not immediately accessible through rich descriptor's
+// methods.
+//
+// Rich descriptors can be accessed in similar ways as their "poor" cousins
+// (descriptor protos). Instead of using proto.FileDescriptor, use
+// desc.LoadFileDescriptor. Message descriptors and extension field descriptors
+// can also be easily accessed using desc.LoadMessageDescriptor and
+// desc.LoadFieldDescriptorForExtension, respectively.
+//
+// It is also possible create rich descriptors for proto messages that a given
+// Go program doesn't even know about. For example, they could be loaded from a
+// FileDescriptorSet file (which can be generated by protoc) or loaded from a
+// server. This enables interesting things like dynamic clients: where a Go
+// program can be an RPC client of a service it wasn't compiled to know about.
+//
+// Also see the grpcreflect, dynamic, and grpcdynamic packages in this same
+// repo to see just how useful rich descriptors really are.
+package desc
diff --git a/vendor/github.com/jhump/protoreflect/desc/imports.go b/vendor/github.com/jhump/protoreflect/desc/imports.go
new file mode 100644
index 0000000..caf3277
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/imports.go
@@ -0,0 +1,306 @@
+package desc
+
+import (
+	"fmt"
+	"path/filepath"
+	"reflect"
+	"strings"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+var (
+	globalImportPathConf map[string]string
+	globalImportPathMu   sync.RWMutex
+)
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path. For more details on why alternate import paths may need to
+// be configured, see ImportResolver.
+//
+// This method panics if provided invalid input. An empty importPath is invalid.
+// An un-registered registerPath is also invalid. For example, if an attempt is
+// made to register the import path "foo/bar.proto" as "bar.proto", but there is
+// no "bar.proto" registered in the Go protobuf runtime, this method will panic.
+// This method also panics if an attempt is made to register the same import
+// path more than once.
+//
+// This function works globally, applying to all descriptors loaded by this
+// package. If you instead want more granular support for handling alternate
+// import paths -- such as for a single invocation of a function in this
+// package or when the alternate path is only used from one file (so you don't
+// want the alternate path used when loading every other file), use an
+// ImportResolver instead.
+func RegisterImportPath(registerPath, importPath string) {
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	desc := proto.FileDescriptor(registerPath)
+	if len(desc) == 0 {
+		panic(fmt.Sprintf("path %q is not a registered proto file", registerPath))
+	}
+	globalImportPathMu.Lock()
+	defer globalImportPathMu.Unlock()
+	if reg := globalImportPathConf[importPath]; reg != "" {
+		panic(fmt.Sprintf("import path %q already registered for %s", importPath, reg))
+	}
+	if globalImportPathConf == nil {
+		globalImportPathConf = map[string]string{}
+	}
+	globalImportPathConf[importPath] = registerPath
+}
+
+// ResolveImport resolves the given import path. If it has been registered as an
+// alternate via RegisterImportPath, the registered path is returned. Otherwise,
+// the given import path is returned unchanged.
+func ResolveImport(importPath string) string {
+	importPath = clean(importPath)
+	globalImportPathMu.RLock()
+	defer globalImportPathMu.RUnlock()
+	reg := globalImportPathConf[importPath]
+	if reg == "" {
+		return importPath
+	}
+	return reg
+}
+
+// ImportResolver lets you work-around linking issues that are caused by
+// mismatches between how a particular proto source file is registered in the Go
+// protobuf runtime and how that same file is imported by other files. The file
+// is registered using the same relative path given to protoc when the file is
+// compiled (i.e. when Go code is generated). So if any file tries to import
+// that source file, but using a different relative path, then a link error will
+// occur when this package tries to load a descriptor for the importing file.
+//
+// For example, let's say we have two proto source files: "foo/bar.proto" and
+// "fubar/baz.proto". The latter imports the former using a line like so:
+//    import "foo/bar.proto";
+// However, when protoc is invoked, the command-line args looks like so:
+//    protoc -Ifoo/ --go_out=foo/ bar.proto
+//    protoc -I./ -Ifubar/ --go_out=fubar/ baz.proto
+// Because the path given to protoc is just "bar.proto" and "baz.proto", this is
+// how they are registered in the Go protobuf runtime. So, when loading the
+// descriptor for "fubar/baz.proto", we'll see an import path of "foo/bar.proto"
+// but will find no file registered with that path:
+//    fd, err := desc.LoadFileDescriptor("baz.proto")
+//    // err will be non-nil, complaining that there is no such file
+//    // found named "foo/bar.proto"
+//
+// This can be remedied by registering alternate import paths using an
+// ImportResolver. Continuing with the example above, the code below would fix
+// any link issue:
+//    var r desc.ImportResolver
+//    r.RegisterImportPath("bar.proto", "foo/bar.proto")
+//    fd, err := r.LoadFileDescriptor("baz.proto")
+//    // err will be nil; descriptor successfully loaded!
+//
+// If there are files that are *always* imported using a different relative
+// path then how they are registered, consider using the global
+// RegisterImportPath function, so you don't have to use an ImportResolver for
+// every file that imports it.
+type ImportResolver struct {
+	children    map[string]*ImportResolver
+	importPaths map[string]string
+
+	// By default, an ImportResolver will fallback to consulting any paths
+	// registered via the top-level RegisterImportPath function. Setting this
+	// field to true will cause the ImportResolver to skip that fallback and
+	// only examine its own locally registered paths.
+	SkipFallbackRules bool
+}
+
+// ResolveImport resolves the given import path in the context of the given
+// source file. If a matching alternate has been registered with this resolver
+// via a call to RegisterImportPath or RegisterImportPathFrom, then the
+// registered path is returned. Otherwise, the given import path is returned
+// unchanged.
+func (r *ImportResolver) ResolveImport(source, importPath string) string {
+	if r != nil {
+		res := r.resolveImport(clean(source), clean(importPath))
+		if res != "" {
+			return res
+		}
+		if r.SkipFallbackRules {
+			return importPath
+		}
+	}
+	return ResolveImport(importPath)
+}
+
+func (r *ImportResolver) resolveImport(source, importPath string) string {
+	if source == "" {
+		return r.importPaths[importPath]
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch != nil {
+		if reg := ch.resolveImport(cdr, importPath); reg != "" {
+			return reg
+		}
+	}
+	return r.importPaths[importPath]
+}
+
+// RegisterImportPath registers an alternate import path for a given registered
+// proto file path with this resolver. Any appearance of the given import path
+// when linking files will instead try to link the given registered path. If the
+// registered path cannot be located, then linking will fallback to the actual
+// imported path.
+//
+// This method will panic if given an empty path or if the same import path is
+// registered more than once.
+//
+// To constrain the contexts where the given import path is to be re-written,
+// use RegisterImportPathFrom instead.
+func (r *ImportResolver) RegisterImportPath(registerPath, importPath string) {
+	r.RegisterImportPathFrom(registerPath, importPath, "")
+}
+
+// RegisterImportPathFrom registers an alternate import path for a given
+// registered proto file path with this resolver, but only for imports in the
+// specified source context.
+//
+// The source context can be the name of a folder or a proto source file. Any
+// appearance of the given import path in that context will instead try to link
+// the given registered path. To be in context, the file that is being linked
+// (i.e. the one whose import statement is being resolved) must be the same
+// relative path of the source context or be a sub-path (i.e. a descendant of
+// the source folder).
+//
+// If the registered path cannot be located, then linking will fallback to the
+// actual imported path.
+//
+// This method will panic if given an empty path. The source context, on the
+// other hand, is allowed to be blank. A blank source matches all files. This
+// method also panics if the same import path is registered in the same source
+// context more than once.
+func (r *ImportResolver) RegisterImportPathFrom(registerPath, importPath, source string) {
+	importPath = clean(importPath)
+	if len(importPath) == 0 {
+		panic("import path cannot be empty")
+	}
+	registerPath = clean(registerPath)
+	if len(registerPath) == 0 {
+		panic("registered path cannot be empty")
+	}
+	r.registerImportPathFrom(registerPath, importPath, clean(source))
+}
+
+func (r *ImportResolver) registerImportPathFrom(registerPath, importPath, source string) {
+	if source == "" {
+		if r.importPaths == nil {
+			r.importPaths = map[string]string{}
+		} else if reg := r.importPaths[importPath]; reg != "" {
+			panic(fmt.Sprintf("already registered import path %q as %q", importPath, registerPath))
+		}
+		r.importPaths[importPath] = registerPath
+		return
+	}
+	var car, cdr string
+	idx := strings.IndexRune(source, filepath.Separator)
+	if idx < 0 {
+		car, cdr = source, ""
+	} else {
+		car, cdr = source[:idx], source[idx+1:]
+	}
+	ch := r.children[car]
+	if ch == nil {
+		if r.children == nil {
+			r.children = map[string]*ImportResolver{}
+		}
+		ch = &ImportResolver{}
+		r.children[car] = ch
+	}
+	ch.registerImportPathFrom(registerPath, importPath, cdr)
+}
+
+// LoadFileDescriptor is the same as the package function of the same name, but
+// any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) LoadFileDescriptor(filePath string) (*FileDescriptor, error) {
+	return loadFileDescriptor(filePath, r)
+}
+
+// LoadMessageDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking
+// files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptor(msgName string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(msgName, r)
+}
+
+// LoadMessageDescriptorForMessage is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForMessage(msg proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(msg, r)
+}
+
+// LoadMessageDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadMessageDescriptorForType(msgType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(msgType, r)
+}
+
+// LoadEnumDescriptorForEnum is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, r)
+}
+
+// LoadEnumDescriptorForType is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, r)
+}
+
+// LoadFieldDescriptorForExtension is the same as the package function of the
+// same name, but any alternate paths configured in this resolver are used when
+// linking files for the returned descriptor.
+func (r *ImportResolver) LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, r)
+}
+
+// CreateFileDescriptor is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor proto.
+func (r *ImportResolver) CreateFileDescriptor(fdp *dpb.FileDescriptorProto, deps ...*FileDescriptor) (*FileDescriptor, error) {
+	return createFileDescriptor(fdp, deps, r)
+}
+
+// CreateFileDescriptors is the same as the package function of the same name,
+// but any alternate paths configured in this resolver are used when linking the
+// given descriptor protos.
+func (r *ImportResolver) CreateFileDescriptors(fds []*dpb.FileDescriptorProto) (map[string]*FileDescriptor, error) {
+	return createFileDescriptors(fds, r)
+}
+
+// CreateFileDescriptorFromSet is the same as the package function of the same
+// name, but any alternate paths configured in this resolver are used when
+// linking the descriptor protos in the given set.
+func (r *ImportResolver) CreateFileDescriptorFromSet(fds *dpb.FileDescriptorSet) (*FileDescriptor, error) {
+	return createFileDescriptorFromSet(fds, r)
+}
+
+const dotPrefix = "." + string(filepath.Separator)
+
+func clean(path string) string {
+	if path == "" {
+		return ""
+	}
+	path = filepath.Clean(path)
+	if path == "." {
+		return ""
+	}
+	return strings.TrimPrefix(path, dotPrefix)
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
new file mode 100644
index 0000000..4d7dbae
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/source_info.go
@@ -0,0 +1,96 @@
+package internal
+
+import (
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// SourceInfoMap is a map of paths in a descriptor to the corresponding source
+// code info.
+type SourceInfoMap map[string]*dpb.SourceCodeInfo_Location
+
+// Get returns the source code info for the given path.
+func (m SourceInfoMap) Get(path []int32) *dpb.SourceCodeInfo_Location {
+	return m[asMapKey(path)]
+}
+
+// Put stores the given source code info for the given path.
+func (m SourceInfoMap) Put(path []int32, loc *dpb.SourceCodeInfo_Location) {
+	m[asMapKey(path)] = loc
+}
+
+// PutIfAbsent stores the given source code info for the given path only if the
+// given path does not exist in the map. This method returns true when the value
+// is stored, false if the path already exists.
+func (m SourceInfoMap) PutIfAbsent(path []int32, loc *dpb.SourceCodeInfo_Location) bool {
+	k := asMapKey(path)
+	if _, ok := m[k]; ok {
+		return false
+	}
+	m[k] = loc
+	return true
+}
+
+func asMapKey(slice []int32) string {
+	// NB: arrays should be usable as map keys, but this does not
+	// work due to a bug: https://github.com/golang/go/issues/22605
+	//rv := reflect.ValueOf(slice)
+	//arrayType := reflect.ArrayOf(rv.Len(), rv.Type().Elem())
+	//array := reflect.New(arrayType).Elem()
+	//reflect.Copy(array, rv)
+	//return array.Interface()
+
+	b := make([]byte, len(slice)*4)
+	for i, s := range slice {
+		j := i * 4
+		b[j] = byte(s)
+		b[j+1] = byte(s >> 8)
+		b[j+2] = byte(s >> 16)
+		b[j+3] = byte(s >> 24)
+	}
+	return string(b)
+}
+
+// CreateSourceInfoMap constructs a new SourceInfoMap and populates it with the
+// source code info in the given file descriptor proto.
+func CreateSourceInfoMap(fd *dpb.FileDescriptorProto) SourceInfoMap {
+	res := SourceInfoMap{}
+	PopulateSourceInfoMap(fd, res)
+	return res
+}
+
+// PopulateSourceInfoMap populates the given SourceInfoMap with information from
+// the given file descriptor.
+func PopulateSourceInfoMap(fd *dpb.FileDescriptorProto, m SourceInfoMap) {
+	for _, l := range fd.GetSourceCodeInfo().GetLocation() {
+		m.Put(l.Path, l)
+	}
+}
+
+// NB: This wonkiness allows desc.Descriptor impl to implement an interface that
+// is only usable from this package, by embedding a SourceInfoComputeFunc that
+// implements the actual logic (which must live in desc package to avoid a
+// dependency cycle).
+
+// SourceInfoComputer is a single method which will be invoked to recompute
+// source info. This is needed for the protoparse package, which needs to link
+// descriptors without source info in order to interpret options, but then needs
+// to re-compute source info after that interpretation so that final linked
+// descriptors expose the right info.
+type SourceInfoComputer interface {
+	recomputeSourceInfo()
+}
+
+// SourceInfoComputeFunc is the type that a desc.Descriptor will embed. It will
+// be aliased in the desc package to an unexported name so it is not marked as
+// an exported field in reflection and not present in Go docs.
+type SourceInfoComputeFunc func()
+
+func (f SourceInfoComputeFunc) recomputeSourceInfo() {
+	f()
+}
+
+// RecomputeSourceInfo is used to initiate recomputation of source info. This is
+// is used by the protoparse package, after it interprets options.
+func RecomputeSourceInfo(c SourceInfoComputer) {
+	c.recomputeSourceInfo()
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/internal/util.go b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
new file mode 100644
index 0000000..d5197f1
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/internal/util.go
@@ -0,0 +1,267 @@
+package internal
+
+import (
+	"unicode"
+	"unicode/utf8"
+)
+
+const (
+	// MaxTag is the maximum allowed tag number for a field.
+	MaxTag = 536870911 // 2^29 - 1
+
+	// SpecialReservedStart is the first tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedStart = 19000
+	// SpecialReservedEnd is the last tag in a range that is reserved and not
+	// allowed for use in message definitions.
+	SpecialReservedEnd = 19999
+
+	// NB: It would be nice to use constants from generated code instead of
+	// hard-coding these here. But code-gen does not emit these as constants
+	// anywhere. The only places they appear in generated code are struct tags
+	// on fields of the generated descriptor protos.
+
+	// File_packageTag is the tag number of the package element in a file
+	// descriptor proto.
+	File_packageTag = 2
+	// File_dependencyTag is the tag number of the dependencies element in a
+	// file descriptor proto.
+	File_dependencyTag = 3
+	// File_messagesTag is the tag number of the messages element in a file
+	// descriptor proto.
+	File_messagesTag = 4
+	// File_enumsTag is the tag number of the enums element in a file descriptor
+	// proto.
+	File_enumsTag = 5
+	// File_servicesTag is the tag number of the services element in a file
+	// descriptor proto.
+	File_servicesTag = 6
+	// File_extensionsTag is the tag number of the extensions element in a file
+	// descriptor proto.
+	File_extensionsTag = 7
+	// File_optionsTag is the tag number of the options element in a file
+	// descriptor proto.
+	File_optionsTag = 8
+	// File_syntaxTag is the tag number of the syntax element in a file
+	// descriptor proto.
+	File_syntaxTag = 12
+	// Message_nameTag is the tag number of the name element in a message
+	// descriptor proto.
+	Message_nameTag = 1
+	// Message_fieldsTag is the tag number of the fields element in a message
+	// descriptor proto.
+	Message_fieldsTag = 2
+	// Message_nestedMessagesTag is the tag number of the nested messages
+	// element in a message descriptor proto.
+	Message_nestedMessagesTag = 3
+	// Message_enumsTag is the tag number of the enums element in a message
+	// descriptor proto.
+	Message_enumsTag = 4
+	// Message_extensionRangeTag is the tag number of the extension ranges
+	// element in a message descriptor proto.
+	Message_extensionRangeTag = 5
+	// Message_extensionsTag is the tag number of the extensions element in a
+	// message descriptor proto.
+	Message_extensionsTag = 6
+	// Message_optionsTag is the tag number of the options element in a message
+	// descriptor proto.
+	Message_optionsTag = 7
+	// Message_oneOfsTag is the tag number of the one-ofs element in a message
+	// descriptor proto.
+	Message_oneOfsTag = 8
+	// Message_reservedRangeTag is the tag number of the reserved ranges element
+	// in a message descriptor proto.
+	Message_reservedRangeTag = 9
+	// Message_reservedNameTag is the tag number of the reserved names element
+	// in a message descriptor proto.
+	Message_reservedNameTag = 10
+	// ExtensionRange_startTag is the tag number of the start index in an
+	// extension range proto.
+	ExtensionRange_startTag = 1
+	// ExtensionRange_endTag is the tag number of the end index in an
+	// extension range proto.
+	ExtensionRange_endTag = 2
+	// ExtensionRange_optionsTag is the tag number of the options element in an
+	// extension range proto.
+	ExtensionRange_optionsTag = 3
+	// ReservedRange_startTag is the tag number of the start index in a reserved
+	// range proto.
+	ReservedRange_startTag = 1
+	// ReservedRange_endTag is the tag number of the end index in a reserved
+	// range proto.
+	ReservedRange_endTag = 2
+	// Field_nameTag is the tag number of the name element in a field descriptor
+	// proto.
+	Field_nameTag = 1
+	// Field_extendeeTag is the tag number of the extendee element in a field
+	// descriptor proto.
+	Field_extendeeTag = 2
+	// Field_numberTag is the tag number of the number element in a field
+	// descriptor proto.
+	Field_numberTag = 3
+	// Field_labelTag is the tag number of the label element in a field
+	// descriptor proto.
+	Field_labelTag = 4
+	// Field_typeTag is the tag number of the type element in a field descriptor
+	// proto.
+	Field_typeTag = 5
+	// Field_defaultTag is the tag number of the default value element in a
+	// field descriptor proto.
+	Field_defaultTag = 7
+	// Field_optionsTag is the tag number of the options element in a field
+	// descriptor proto.
+	Field_optionsTag = 8
+	// Field_jsonNameTag is the tag number of the JSON name element in a field
+	// descriptor proto.
+	Field_jsonNameTag = 10
+	// OneOf_nameTag is the tag number of the name element in a one-of
+	// descriptor proto.
+	OneOf_nameTag = 1
+	// OneOf_optionsTag is the tag number of the options element in a one-of
+	// descriptor proto.
+	OneOf_optionsTag = 2
+	// Enum_nameTag is the tag number of the name element in an enum descriptor
+	// proto.
+	Enum_nameTag = 1
+	// Enum_valuesTag is the tag number of the values element in an enum
+	// descriptor proto.
+	Enum_valuesTag = 2
+	// Enum_optionsTag is the tag number of the options element in an enum
+	// descriptor proto.
+	Enum_optionsTag = 3
+	// Enum_reservedRangeTag is the tag number of the reserved ranges element in
+	// an enum descriptor proto.
+	Enum_reservedRangeTag = 4
+	// Enum_reservedNameTag is the tag number of the reserved names element in
+	// an enum descriptor proto.
+	Enum_reservedNameTag = 5
+	// EnumVal_nameTag is the tag number of the name element in an enum value
+	// descriptor proto.
+	EnumVal_nameTag = 1
+	// EnumVal_numberTag is the tag number of the number element in an enum
+	// value descriptor proto.
+	EnumVal_numberTag = 2
+	// EnumVal_optionsTag is the tag number of the options element in an enum
+	// value descriptor proto.
+	EnumVal_optionsTag = 3
+	// Service_nameTag is the tag number of the name element in a service
+	// descriptor proto.
+	Service_nameTag = 1
+	// Service_methodsTag is the tag number of the methods element in a service
+	// descriptor proto.
+	Service_methodsTag = 2
+	// Service_optionsTag is the tag number of the options element in a service
+	// descriptor proto.
+	Service_optionsTag = 3
+	// Method_nameTag is the tag number of the name element in a method
+	// descriptor proto.
+	Method_nameTag = 1
+	// Method_inputTag is the tag number of the input type element in a method
+	// descriptor proto.
+	Method_inputTag = 2
+	// Method_outputTag is the tag number of the output type element in a method
+	// descriptor proto.
+	Method_outputTag = 3
+	// Method_optionsTag is the tag number of the options element in a method
+	// descriptor proto.
+	Method_optionsTag = 4
+	// Method_inputStreamTag is the tag number of the input stream flag in a
+	// method descriptor proto.
+	Method_inputStreamTag = 5
+	// Method_outputStreamTag is the tag number of the output stream flag in a
+	// method descriptor proto.
+	Method_outputStreamTag = 6
+
+	// UninterpretedOptionsTag is the tag number of the uninterpreted options
+	// element. All *Options messages use the same tag for the field that stores
+	// uninterpreted options.
+	UninterpretedOptionsTag = 999
+
+	// Uninterpreted_nameTag is the tag number of the name element in an
+	// uninterpreted options proto.
+	Uninterpreted_nameTag = 2
+	// Uninterpreted_identTag is the tag number of the identifier value in an
+	// uninterpreted options proto.
+	Uninterpreted_identTag = 3
+	// Uninterpreted_posIntTag is the tag number of the positive int value in an
+	// uninterpreted options proto.
+	Uninterpreted_posIntTag = 4
+	// Uninterpreted_negIntTag is the tag number of the negative int value in an
+	// uninterpreted options proto.
+	Uninterpreted_negIntTag = 5
+	// Uninterpreted_doubleTag is the tag number of the double value in an
+	// uninterpreted options proto.
+	Uninterpreted_doubleTag = 6
+	// Uninterpreted_stringTag is the tag number of the string value in an
+	// uninterpreted options proto.
+	Uninterpreted_stringTag = 7
+	// Uninterpreted_aggregateTag is the tag number of the aggregate value in an
+	// uninterpreted options proto.
+	Uninterpreted_aggregateTag = 8
+	// UninterpretedName_nameTag is the tag number of the name element in an
+	// uninterpreted option name proto.
+	UninterpretedName_nameTag = 1
+)
+
+// JsonName returns the default JSON name for a field with the given name.
+func JsonName(name string) string {
+	var js []rune
+	nextUpper := false
+	for i, r := range name {
+		if r == '_' {
+			nextUpper = true
+			continue
+		}
+		if i == 0 {
+			js = append(js, r)
+		} else if nextUpper {
+			nextUpper = false
+			js = append(js, unicode.ToUpper(r))
+		} else {
+			js = append(js, r)
+		}
+	}
+	return string(js)
+}
+
+// InitCap returns the given field name, but with the first letter capitalized.
+func InitCap(name string) string {
+	r, sz := utf8.DecodeRuneInString(name)
+	return string(unicode.ToUpper(r)) + name[sz:]
+}
+
+// CreatePrefixList returns a list of package prefixes to search when resolving
+// a symbol name. If the given package is blank, it returns only the empty
+// string. If the given package contains only one token, e.g. "foo", it returns
+// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
+// successively shorter prefixes of the package and then the empty string. For
+// example, for a package named "foo.bar.baz" it will return the following list:
+//   ["foo.bar.baz", "foo.bar", "foo", ""]
+func CreatePrefixList(pkg string) []string {
+	if pkg == "" {
+		return []string{""}
+	}
+
+	numDots := 0
+	// one pass to pre-allocate the returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			numDots++
+		}
+	}
+	if numDots == 0 {
+		return []string{pkg, ""}
+	}
+
+	prefixes := make([]string, numDots+2)
+	// second pass to fill in returned slice
+	for i := 0; i < len(pkg); i++ {
+		if pkg[i] == '.' {
+			prefixes[numDots] = pkg[:i]
+			numDots--
+		}
+	}
+	prefixes[0] = pkg
+
+	return prefixes
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/load.go b/vendor/github.com/jhump/protoreflect/desc/load.go
new file mode 100644
index 0000000..4a05830
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/load.go
@@ -0,0 +1,341 @@
+package desc
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/internal"
+)
+
+var (
+	cacheMu       sync.RWMutex
+	filesCache    = map[string]*FileDescriptor{}
+	messagesCache = map[string]*MessageDescriptor{}
+	enumCache     = map[reflect.Type]*EnumDescriptor{}
+)
+
+// LoadFileDescriptor creates a file descriptor using the bytes returned by
+// proto.FileDescriptor. Descriptors are cached so that they do not need to be
+// re-processed if the same file is fetched again later.
+func LoadFileDescriptor(file string) (*FileDescriptor, error) {
+	return loadFileDescriptor(file, nil)
+}
+
+func loadFileDescriptor(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := getFileFromCache(file)
+	if f != nil {
+		return f, nil
+	}
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadFileDescriptorLocked(file, r)
+}
+
+func loadFileDescriptorLocked(file string, r *ImportResolver) (*FileDescriptor, error) {
+	f := filesCache[file]
+	if f != nil {
+		return f, nil
+	}
+	fd, err := internal.LoadFileDescriptor(file)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err = toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(file, f)
+	return f, nil
+}
+
+func toFileDescriptorLocked(fd *dpb.FileDescriptorProto, r *ImportResolver) (*FileDescriptor, error) {
+	deps := make([]*FileDescriptor, len(fd.GetDependency()))
+	for i, dep := range fd.GetDependency() {
+		resolvedDep := r.ResolveImport(fd.GetName(), dep)
+		var err error
+		deps[i], err = loadFileDescriptorLocked(resolvedDep, r)
+		if _, ok := err.(internal.ErrNoSuchFile); ok && resolvedDep != dep {
+			// try original path
+			deps[i], err = loadFileDescriptorLocked(dep, r)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return CreateFileDescriptor(fd, deps...)
+}
+
+func getFileFromCache(file string) *FileDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return filesCache[file]
+}
+
+func putCacheLocked(filename string, fd *FileDescriptor) {
+	filesCache[filename] = fd
+	putMessageCacheLocked(fd.messages)
+}
+
+func putMessageCacheLocked(mds []*MessageDescriptor) {
+	for _, md := range mds {
+		messagesCache[md.fqn] = md
+		putMessageCacheLocked(md.nested)
+	}
+}
+
+// interface implemented by generated messages, which all have a Descriptor() method in
+// addition to the methods of proto.Message
+type protoMessage interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// LoadMessageDescriptor loads descriptor using the encoded descriptor proto returned by
+// Message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptor(message string) (*MessageDescriptor, error) {
+	return loadMessageDescriptor(message, nil)
+}
+
+func loadMessageDescriptor(message string, r *ImportResolver) (*MessageDescriptor, error) {
+	m := getMessageFromCache(message)
+	if m != nil {
+		return m, nil
+	}
+
+	pt := proto.MessageType(message)
+	if pt == nil {
+		return nil, nil
+	}
+	msg, err := messageFromType(pt)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(message, msg, r)
+}
+
+// LoadMessageDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by message.Descriptor() for the given message type. If the given type is not recognized,
+// then a nil descriptor is returned.
+func LoadMessageDescriptorForType(messageType reflect.Type) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForType(messageType, nil)
+}
+
+func loadMessageDescriptorForType(messageType reflect.Type, r *ImportResolver) (*MessageDescriptor, error) {
+	m, err := messageFromType(messageType)
+	if err != nil {
+		return nil, err
+	}
+	return loadMessageDescriptorForMessage(m, r)
+}
+
+// LoadMessageDescriptorForMessage loads descriptor using the encoded descriptor proto
+// returned by message.Descriptor(). If the given type is not recognized, then a nil
+// descriptor is returned.
+func LoadMessageDescriptorForMessage(message proto.Message) (*MessageDescriptor, error) {
+	return loadMessageDescriptorForMessage(message, nil)
+}
+
+func loadMessageDescriptorForMessage(message proto.Message, r *ImportResolver) (*MessageDescriptor, error) {
+	// efficiently handle dynamic messages
+	type descriptorable interface {
+		GetMessageDescriptor() *MessageDescriptor
+	}
+	if d, ok := message.(descriptorable); ok {
+		return d.GetMessageDescriptor(), nil
+	}
+
+	name := proto.MessageName(message)
+	if name == "" {
+		return nil, nil
+	}
+	m := getMessageFromCache(name)
+	if m != nil {
+		return m, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadMessageDescriptorForTypeLocked(name, message.(protoMessage), nil)
+}
+
+func messageFromType(mt reflect.Type) (protoMessage, error) {
+	if mt.Kind() != reflect.Ptr {
+		mt = reflect.PtrTo(mt)
+	}
+	m, ok := reflect.Zero(mt).Interface().(protoMessage)
+	if !ok {
+		return nil, fmt.Errorf("failed to create message from type: %v", mt)
+	}
+	return m, nil
+}
+
+func loadMessageDescriptorForTypeLocked(name string, message protoMessage, r *ImportResolver) (*MessageDescriptor, error) {
+	m := messagesCache[name]
+	if m != nil {
+		return m, nil
+	}
+
+	fdb, _ := message.Descriptor()
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	f, err := toFileDescriptorLocked(fd, r)
+	if err != nil {
+		return nil, err
+	}
+	putCacheLocked(fd.GetName(), f)
+	return f.FindSymbol(name).(*MessageDescriptor), nil
+}
+
+func getMessageFromCache(message string) *MessageDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return messagesCache[message]
+}
+
+// interface implemented by all generated enums
+type protoEnum interface {
+	EnumDescriptor() ([]byte, []int)
+}
+
+// NB: There is no LoadEnumDescriptor that takes a fully-qualified enum name because
+// it is not useful since protoc-gen-go does not expose the name anywhere in generated
+// code or register it in a way that is it accessible for reflection code. This also
+// means we have to cache enum descriptors differently -- we can only cache them as
+// they are requested, as opposed to caching all enum types whenever a file descriptor
+// is cached. This is because we need to know the generated type of the enums, and we
+// don't know that at the time of caching file descriptors.
+
+// LoadEnumDescriptorForType loads descriptor using the encoded descriptor proto returned
+// by enum.EnumDescriptor() for the given enum type.
+func LoadEnumDescriptorForType(enumType reflect.Type) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForType(enumType, nil)
+}
+
+func loadEnumDescriptorForType(enumType reflect.Type, r *ImportResolver) (*EnumDescriptor, error) {
+	// we cache descriptors using non-pointer type
+	if enumType.Kind() == reflect.Ptr {
+		enumType = enumType.Elem()
+	}
+	e := getEnumFromCache(enumType)
+	if e != nil {
+		return e, nil
+	}
+	enum, err := enumFromType(enumType)
+	if err != nil {
+		return nil, err
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(enumType, enum, r)
+}
+
+// LoadEnumDescriptorForEnum loads descriptor using the encoded descriptor proto
+// returned by enum.EnumDescriptor().
+func LoadEnumDescriptorForEnum(enum protoEnum) (*EnumDescriptor, error) {
+	return loadEnumDescriptorForEnum(enum, nil)
+}
+
+func loadEnumDescriptorForEnum(enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	et := reflect.TypeOf(enum)
+	// we cache descriptors using non-pointer type
+	if et.Kind() == reflect.Ptr {
+		et = et.Elem()
+		enum = reflect.Zero(et).Interface().(protoEnum)
+	}
+	e := getEnumFromCache(et)
+	if e != nil {
+		return e, nil
+	}
+
+	cacheMu.Lock()
+	defer cacheMu.Unlock()
+	return loadEnumDescriptorForTypeLocked(et, enum, r)
+}
+
+func enumFromType(et reflect.Type) (protoEnum, error) {
+	if et.Kind() != reflect.Int32 {
+		et = reflect.PtrTo(et)
+	}
+	e, ok := reflect.Zero(et).Interface().(protoEnum)
+	if !ok {
+		return nil, fmt.Errorf("failed to create enum from type: %v", et)
+	}
+	return e, nil
+}
+
+func loadEnumDescriptorForTypeLocked(et reflect.Type, enum protoEnum, r *ImportResolver) (*EnumDescriptor, error) {
+	e := enumCache[et]
+	if e != nil {
+		return e, nil
+	}
+
+	fdb, path := enum.EnumDescriptor()
+	name := fmt.Sprintf("%v", et)
+	fd, err := internal.DecodeFileDescriptor(name, fdb)
+	if err != nil {
+		return nil, err
+	}
+	// see if we already have cached "rich" descriptor
+	f, ok := filesCache[fd.GetName()]
+	if !ok {
+		f, err = toFileDescriptorLocked(fd, r)
+		if err != nil {
+			return nil, err
+		}
+		putCacheLocked(fd.GetName(), f)
+	}
+
+	ed := findEnum(f, path)
+	enumCache[et] = ed
+	return ed, nil
+}
+
+func getEnumFromCache(et reflect.Type) *EnumDescriptor {
+	cacheMu.RLock()
+	defer cacheMu.RUnlock()
+	return enumCache[et]
+}
+
+func findEnum(fd *FileDescriptor, path []int) *EnumDescriptor {
+	if len(path) == 1 {
+		return fd.GetEnumTypes()[path[0]]
+	}
+	md := fd.GetMessageTypes()[path[0]]
+	for _, i := range path[1 : len(path)-1] {
+		md = md.GetNestedMessageTypes()[i]
+	}
+	return md.GetNestedEnumTypes()[path[len(path)-1]]
+}
+
+// LoadFieldDescriptorForExtension loads the field descriptor that corresponds to the given
+// extension description.
+func LoadFieldDescriptorForExtension(ext *proto.ExtensionDesc) (*FieldDescriptor, error) {
+	return loadFieldDescriptorForExtension(ext, nil)
+}
+
+func loadFieldDescriptorForExtension(ext *proto.ExtensionDesc, r *ImportResolver) (*FieldDescriptor, error) {
+	file, err := loadFileDescriptor(ext.Filename, r)
+	if err != nil {
+		return nil, err
+	}
+	field, ok := file.FindSymbol(ext.Name).(*FieldDescriptor)
+	// make sure descriptor agrees with attributes of the ExtensionDesc
+	if !ok || !field.IsExtension() || field.GetOwner().GetFullyQualifiedName() != proto.MessageName(ext.ExtendedType) ||
+		field.GetNumber() != ext.Field {
+		return nil, fmt.Errorf("file descriptor contained unexpected object with name %s", ext.Name)
+	}
+	return field, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
new file mode 100644
index 0000000..2652053
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/.gitignore
@@ -0,0 +1 @@
+y.output
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
new file mode 100644
index 0000000..2499917
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/ast.go
@@ -0,0 +1,1081 @@
+package protoparse
+
+import "fmt"
+
+// This file defines all of the nodes in the proto AST.
+
+// ErrorWithSourcePos is an error about a proto source file includes information
+// about the location in the file that caused the error.
+type ErrorWithSourcePos struct {
+	Underlying error
+	Pos        *SourcePos
+}
+
+// Error implements the error interface
+func (e ErrorWithSourcePos) Error() string {
+	if e.Pos.Line <= 0 || e.Pos.Col <= 0 {
+		return fmt.Sprintf("%s: %v", e.Pos.Filename, e.Underlying)
+	}
+	return fmt.Sprintf("%s:%d:%d: %v", e.Pos.Filename, e.Pos.Line, e.Pos.Col, e.Underlying)
+}
+
+// SourcePos identifies a location in a proto source file.
+type SourcePos struct {
+	Filename  string
+	Line, Col int
+	Offset    int
+}
+
+func unknownPos(filename string) *SourcePos {
+	return &SourcePos{Filename: filename}
+}
+
+type node interface {
+	start() *SourcePos
+	end() *SourcePos
+	leadingComments() []*comment
+	trailingComments() []*comment
+}
+
+type terminalNode interface {
+	node
+	popLeadingComment() *comment
+	pushTrailingComment(*comment)
+}
+
+var _ terminalNode = (*basicNode)(nil)
+var _ terminalNode = (*stringLiteralNode)(nil)
+var _ terminalNode = (*intLiteralNode)(nil)
+var _ terminalNode = (*floatLiteralNode)(nil)
+var _ terminalNode = (*identNode)(nil)
+
+type fileDecl interface {
+	node
+	getSyntax() node
+}
+
+var _ fileDecl = (*fileNode)(nil)
+var _ fileDecl = (*noSourceNode)(nil)
+
+type optionDecl interface {
+	node
+	getName() node
+	getValue() valueNode
+}
+
+var _ optionDecl = (*optionNode)(nil)
+var _ optionDecl = (*noSourceNode)(nil)
+
+type fieldDecl interface {
+	node
+	fieldLabel() node
+	fieldName() node
+	fieldType() node
+	fieldTag() node
+	fieldExtendee() node
+	getGroupKeyword() node
+}
+
+var _ fieldDecl = (*fieldNode)(nil)
+var _ fieldDecl = (*groupNode)(nil)
+var _ fieldDecl = (*mapFieldNode)(nil)
+var _ fieldDecl = (*syntheticMapField)(nil)
+var _ fieldDecl = (*noSourceNode)(nil)
+
+type rangeDecl interface {
+	node
+	rangeStart() node
+	rangeEnd() node
+}
+
+var _ rangeDecl = (*rangeNode)(nil)
+var _ rangeDecl = (*noSourceNode)(nil)
+
+type enumValueDecl interface {
+	node
+	getName() node
+	getNumber() node
+}
+
+var _ enumValueDecl = (*enumValueNode)(nil)
+var _ enumValueDecl = (*noSourceNode)(nil)
+
+type msgDecl interface {
+	node
+	messageName() node
+	reservedNames() []*stringLiteralNode
+}
+
+var _ msgDecl = (*messageNode)(nil)
+var _ msgDecl = (*groupNode)(nil)
+var _ msgDecl = (*mapFieldNode)(nil)
+var _ msgDecl = (*noSourceNode)(nil)
+
+type methodDecl interface {
+	node
+	getInputType() node
+	getOutputType() node
+}
+
+var _ methodDecl = (*methodNode)(nil)
+var _ methodDecl = (*noSourceNode)(nil)
+
+type posRange struct {
+	start, end *SourcePos
+}
+
+type basicNode struct {
+	posRange
+	leading  []*comment
+	trailing []*comment
+}
+
+func (n *basicNode) start() *SourcePos {
+	return n.posRange.start
+}
+
+func (n *basicNode) end() *SourcePos {
+	return n.posRange.end
+}
+
+func (n *basicNode) leadingComments() []*comment {
+	return n.leading
+}
+
+func (n *basicNode) trailingComments() []*comment {
+	return n.trailing
+}
+
+func (n *basicNode) popLeadingComment() *comment {
+	c := n.leading[0]
+	n.leading = n.leading[1:]
+	return c
+}
+
+func (n *basicNode) pushTrailingComment(c *comment) {
+	n.trailing = append(n.trailing, c)
+}
+
+type comment struct {
+	posRange
+	text string
+}
+
+type basicCompositeNode struct {
+	first node
+	last  node
+}
+
+func (n *basicCompositeNode) start() *SourcePos {
+	return n.first.start()
+}
+
+func (n *basicCompositeNode) end() *SourcePos {
+	return n.last.end()
+}
+
+func (n *basicCompositeNode) leadingComments() []*comment {
+	return n.first.leadingComments()
+}
+
+func (n *basicCompositeNode) trailingComments() []*comment {
+	return n.last.trailingComments()
+}
+
+func (n *basicCompositeNode) setRange(first, last node) {
+	n.first = first
+	n.last = last
+}
+
+type fileNode struct {
+	basicCompositeNode
+	syntax *syntaxNode
+	decls  []*fileElement
+
+	// These fields are populated after parsing, to make it easier to find them
+	// without searching decls. The parse result has a map of descriptors to
+	// nodes which makes the other declarations easily discoverable. But these
+	// elements do not map to descriptors -- they are just stored as strings in
+	// the file descriptor.
+	imports []*importNode
+	pkg     *packageNode
+}
+
+func (n *fileNode) getSyntax() node {
+	return n.syntax
+}
+
+type fileElement struct {
+	// a discriminated union: only one field will be set
+	imp     *importNode
+	pkg     *packageNode
+	option  *optionNode
+	message *messageNode
+	enum    *enumNode
+	extend  *extendNode
+	service *serviceNode
+	empty   *basicNode
+}
+
+func (n *fileElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *fileElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *fileElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *fileElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *fileElement) get() node {
+	switch {
+	case n.imp != nil:
+		return n.imp
+	case n.pkg != nil:
+		return n.pkg
+	case n.option != nil:
+		return n.option
+	case n.message != nil:
+		return n.message
+	case n.enum != nil:
+		return n.enum
+	case n.extend != nil:
+		return n.extend
+	case n.service != nil:
+		return n.service
+	default:
+		return n.empty
+	}
+}
+
+type syntaxNode struct {
+	basicCompositeNode
+	syntax *stringLiteralNode
+}
+
+type importNode struct {
+	basicCompositeNode
+	name   *stringLiteralNode
+	public bool
+	weak   bool
+}
+
+type packageNode struct {
+	basicCompositeNode
+	name *identNode
+}
+
+type identifier string
+
+type identKind int
+
+const (
+	identSimpleName identKind = iota
+	identQualified
+	identTypeName
+)
+
+type identNode struct {
+	basicNode
+	val  string
+	kind identKind
+}
+
+func (n *identNode) value() interface{} {
+	return identifier(n.val)
+}
+
+type optionNode struct {
+	basicCompositeNode
+	name *optionNameNode
+	val  valueNode
+}
+
+func (n *optionNode) getName() node {
+	return n.name
+}
+
+func (n *optionNode) getValue() valueNode {
+	return n.val
+}
+
+type optionNameNode struct {
+	basicCompositeNode
+	parts []*optionNamePartNode
+}
+
+type optionNamePartNode struct {
+	basicCompositeNode
+	text        *identNode
+	offset      int
+	length      int
+	isExtension bool
+	st, en      *SourcePos
+}
+
+func (n *optionNamePartNode) start() *SourcePos {
+	if n.isExtension {
+		return n.basicCompositeNode.start()
+	}
+	return n.st
+}
+
+func (n *optionNamePartNode) end() *SourcePos {
+	if n.isExtension {
+		return n.basicCompositeNode.end()
+	}
+	return n.en
+}
+
+func (n *optionNamePartNode) setRange(first, last node) {
+	n.basicCompositeNode.setRange(first, last)
+	if !n.isExtension {
+		st := *first.start()
+		st.Col += n.offset
+		n.st = &st
+		en := st
+		en.Col += n.length
+		n.en = &en
+	}
+}
+
+type valueNode interface {
+	node
+	value() interface{}
+}
+
+var _ valueNode = (*stringLiteralNode)(nil)
+var _ valueNode = (*intLiteralNode)(nil)
+var _ valueNode = (*negativeIntLiteralNode)(nil)
+var _ valueNode = (*floatLiteralNode)(nil)
+var _ valueNode = (*boolLiteralNode)(nil)
+var _ valueNode = (*sliceLiteralNode)(nil)
+var _ valueNode = (*aggregateLiteralNode)(nil)
+var _ valueNode = (*noSourceNode)(nil)
+
+type stringLiteralNode struct {
+	basicCompositeNode
+	val string
+}
+
+func (n *stringLiteralNode) value() interface{} {
+	return n.val
+}
+
+func (n *stringLiteralNode) popLeadingComment() *comment {
+	return n.first.(terminalNode).popLeadingComment()
+}
+
+func (n *stringLiteralNode) pushTrailingComment(c *comment) {
+	n.last.(terminalNode).pushTrailingComment(c)
+}
+
+type intLiteralNode struct {
+	basicNode
+	val uint64
+}
+
+func (n *intLiteralNode) value() interface{} {
+	return n.val
+}
+
+type negativeIntLiteralNode struct {
+	basicCompositeNode
+	val int64
+}
+
+func (n *negativeIntLiteralNode) value() interface{} {
+	return n.val
+}
+
+type floatLiteralNode struct {
+	basicCompositeNode
+	val float64
+}
+
+func (n *floatLiteralNode) value() interface{} {
+	return n.val
+}
+
+func (n *floatLiteralNode) popLeadingComment() *comment {
+	return n.first.(terminalNode).popLeadingComment()
+}
+
+func (n *floatLiteralNode) pushTrailingComment(c *comment) {
+	n.last.(terminalNode).pushTrailingComment(c)
+}
+
+type boolLiteralNode struct {
+	basicNode
+	val bool
+}
+
+func (n *boolLiteralNode) value() interface{} {
+	return n.val
+}
+
+type sliceLiteralNode struct {
+	basicCompositeNode
+	elements []valueNode
+}
+
+func (n *sliceLiteralNode) value() interface{} {
+	return n.elements
+}
+
+type aggregateLiteralNode struct {
+	basicCompositeNode
+	elements []*aggregateEntryNode
+}
+
+func (n *aggregateLiteralNode) value() interface{} {
+	return n.elements
+}
+
+type aggregateEntryNode struct {
+	basicCompositeNode
+	name *aggregateNameNode
+	val  valueNode
+}
+
+type aggregateNameNode struct {
+	basicCompositeNode
+	name        *identNode
+	isExtension bool
+}
+
+func (a *aggregateNameNode) value() string {
+	if a.isExtension {
+		return "[" + a.name.val + "]"
+	} else {
+		return a.name.val
+	}
+}
+
+type fieldNode struct {
+	basicCompositeNode
+	label   *labelNode
+	fldType *identNode
+	name    *identNode
+	tag     *intLiteralNode
+	options []*optionNode
+
+	// This field is populated after parsing, to allow lookup of extendee source
+	// locations when field extendees cannot be linked. (Otherwise, this is just
+	// stored as a string in the field descriptors defined inside the extend
+	// block).
+	extendee *extendNode
+}
+
+func (n *fieldNode) fieldLabel() node {
+	// proto3 fields and fields inside one-ofs will not have a label and we need
+	// this check in order to return a nil node -- otherwise we'd return a
+	// non-nil node that has a nil pointer value in it :/
+	if n.label == nil {
+		return nil
+	}
+	return n.label
+}
+
+func (n *fieldNode) fieldName() node {
+	return n.name
+}
+
+func (n *fieldNode) fieldType() node {
+	return n.fldType
+}
+
+func (n *fieldNode) fieldTag() node {
+	return n.tag
+}
+
+func (n *fieldNode) fieldExtendee() node {
+	if n.extendee != nil {
+		return n.extendee.extendee
+	}
+	return nil
+}
+
+func (n *fieldNode) getGroupKeyword() node {
+	return nil
+}
+
+type labelNode struct {
+	basicNode
+	repeated bool
+	required bool
+}
+
+type groupNode struct {
+	basicCompositeNode
+	groupKeyword *identNode
+	label        *labelNode
+	name         *identNode
+	tag          *intLiteralNode
+	decls        []*messageElement
+
+	// This field is populated after parsing, to make it easier to find them
+	// without searching decls. The parse result has a map of descriptors to
+	// nodes which makes the other declarations easily discoverable. But these
+	// elements do not map to descriptors -- they are just stored as strings in
+	// the message descriptor.
+	reserved []*stringLiteralNode
+	// This field is populated after parsing, to allow lookup of extendee source
+	// locations when field extendees cannot be linked. (Otherwise, this is just
+	// stored as a string in the field descriptors defined inside the extend
+	// block).
+	extendee *extendNode
+}
+
+func (n *groupNode) fieldLabel() node {
+	return n.label
+}
+
+func (n *groupNode) fieldName() node {
+	return n.name
+}
+
+func (n *groupNode) fieldType() node {
+	return n.name
+}
+
+func (n *groupNode) fieldTag() node {
+	return n.tag
+}
+
+func (n *groupNode) fieldExtendee() node {
+	if n.extendee != nil {
+		return n.extendee.extendee
+	}
+	return nil
+}
+
+func (n *groupNode) getGroupKeyword() node {
+	return n.groupKeyword
+}
+
+func (n *groupNode) messageName() node {
+	return n.name
+}
+
+func (n *groupNode) reservedNames() []*stringLiteralNode {
+	return n.reserved
+}
+
+type oneOfNode struct {
+	basicCompositeNode
+	name  *identNode
+	decls []*oneOfElement
+}
+
+type oneOfElement struct {
+	// a discriminated union: only one field will be set
+	option *optionNode
+	field  *fieldNode
+	empty  *basicNode
+}
+
+func (n *oneOfElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *oneOfElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *oneOfElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *oneOfElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *oneOfElement) get() node {
+	switch {
+	case n.option != nil:
+		return n.option
+	case n.field != nil:
+		return n.field
+	default:
+		return n.empty
+	}
+}
+
+type mapFieldNode struct {
+	basicCompositeNode
+	mapKeyword *identNode
+	keyType    *identNode
+	valueType  *identNode
+	name       *identNode
+	tag        *intLiteralNode
+	options    []*optionNode
+}
+
+func (n *mapFieldNode) fieldLabel() node {
+	return n.mapKeyword
+}
+
+func (n *mapFieldNode) fieldName() node {
+	return n.name
+}
+
+func (n *mapFieldNode) fieldType() node {
+	return n.mapKeyword
+}
+
+func (n *mapFieldNode) fieldTag() node {
+	return n.tag
+}
+
+func (n *mapFieldNode) fieldExtendee() node {
+	return nil
+}
+
+func (n *mapFieldNode) getGroupKeyword() node {
+	return nil
+}
+
+func (n *mapFieldNode) messageName() node {
+	return n.name
+}
+
+func (n *mapFieldNode) reservedNames() []*stringLiteralNode {
+	return nil
+}
+
+func (n *mapFieldNode) keyField() *syntheticMapField {
+	tag := &intLiteralNode{
+		basicNode: basicNode{
+			posRange: posRange{start: n.keyType.start(), end: n.keyType.end()},
+		},
+		val: 1,
+	}
+	return &syntheticMapField{ident: n.keyType, tag: tag}
+}
+
+func (n *mapFieldNode) valueField() *syntheticMapField {
+	tag := &intLiteralNode{
+		basicNode: basicNode{
+			posRange: posRange{start: n.valueType.start(), end: n.valueType.end()},
+		},
+		val: 2,
+	}
+	return &syntheticMapField{ident: n.valueType, tag: tag}
+}
+
+type syntheticMapField struct {
+	ident *identNode
+	tag   *intLiteralNode
+}
+
+func (n *syntheticMapField) start() *SourcePos {
+	return n.ident.start()
+}
+
+func (n *syntheticMapField) end() *SourcePos {
+	return n.ident.end()
+}
+
+func (n *syntheticMapField) leadingComments() []*comment {
+	return nil
+}
+
+func (n *syntheticMapField) trailingComments() []*comment {
+	return nil
+}
+
+func (n *syntheticMapField) fieldLabel() node {
+	return n.ident
+}
+
+func (n *syntheticMapField) fieldName() node {
+	return n.ident
+}
+
+func (n *syntheticMapField) fieldType() node {
+	return n.ident
+}
+
+func (n *syntheticMapField) fieldTag() node {
+	return n.tag
+}
+
+func (n *syntheticMapField) fieldExtendee() node {
+	return nil
+}
+
+func (n *syntheticMapField) getGroupKeyword() node {
+	return nil
+}
+
+type extensionRangeNode struct {
+	basicCompositeNode
+	ranges  []*rangeNode
+	options []*optionNode
+}
+
+type rangeNode struct {
+	basicCompositeNode
+	stNode, enNode node
+	st, en         int32
+}
+
+func (n *rangeNode) rangeStart() node {
+	return n.stNode
+}
+
+func (n *rangeNode) rangeEnd() node {
+	return n.enNode
+}
+
+type reservedNode struct {
+	basicCompositeNode
+	ranges []*rangeNode
+	names  []*stringLiteralNode
+}
+
+type enumNode struct {
+	basicCompositeNode
+	name  *identNode
+	decls []*enumElement
+
+	// This field is populated after parsing, to make it easier to find them
+	// without searching decls. The parse result has a map of descriptors to
+	// nodes which makes the other declarations easily discoverable. But these
+	// elements do not map to descriptors -- they are just stored as strings in
+	// the message descriptor.
+	reserved []*stringLiteralNode
+}
+
+type enumElement struct {
+	// a discriminated union: only one field will be set
+	option   *optionNode
+	value    *enumValueNode
+	reserved *reservedNode
+	empty    *basicNode
+}
+
+func (n *enumElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *enumElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *enumElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *enumElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *enumElement) get() node {
+	switch {
+	case n.option != nil:
+		return n.option
+	case n.value != nil:
+		return n.value
+	default:
+		return n.empty
+	}
+}
+
+type enumValueNode struct {
+	basicCompositeNode
+	name    *identNode
+	options []*optionNode
+
+	// only one of these two will be set:
+
+	numberP *intLiteralNode         // positive numeric value
+	numberN *negativeIntLiteralNode // negative numeric value
+}
+
+func (n *enumValueNode) getName() node {
+	return n.name
+}
+
+func (n *enumValueNode) getNumber() node {
+	if n.numberP != nil {
+		return n.numberP
+	}
+	return n.numberN
+}
+
+type messageNode struct {
+	basicCompositeNode
+	name  *identNode
+	decls []*messageElement
+
+	// This field is populated after parsing, to make it easier to find them
+	// without searching decls. The parse result has a map of descriptors to
+	// nodes which makes the other declarations easily discoverable. But these
+	// elements do not map to descriptors -- they are just stored as strings in
+	// the message descriptor.
+	reserved []*stringLiteralNode
+}
+
+func (n *messageNode) messageName() node {
+	return n.name
+}
+
+func (n *messageNode) reservedNames() []*stringLiteralNode {
+	return n.reserved
+}
+
+type messageElement struct {
+	// a discriminated union: only one field will be set
+	option         *optionNode
+	field          *fieldNode
+	mapField       *mapFieldNode
+	oneOf          *oneOfNode
+	group          *groupNode
+	nested         *messageNode
+	enum           *enumNode
+	extend         *extendNode
+	extensionRange *extensionRangeNode
+	reserved       *reservedNode
+	empty          *basicNode
+}
+
+func (n *messageElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *messageElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *messageElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *messageElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *messageElement) get() node {
+	switch {
+	case n.option != nil:
+		return n.option
+	case n.field != nil:
+		return n.field
+	case n.mapField != nil:
+		return n.mapField
+	case n.oneOf != nil:
+		return n.oneOf
+	case n.group != nil:
+		return n.group
+	case n.nested != nil:
+		return n.nested
+	case n.enum != nil:
+		return n.enum
+	case n.extend != nil:
+		return n.extend
+	case n.extensionRange != nil:
+		return n.extensionRange
+	case n.reserved != nil:
+		return n.reserved
+	default:
+		return n.empty
+	}
+}
+
+type extendNode struct {
+	basicCompositeNode
+	extendee *identNode
+	decls    []*extendElement
+}
+
+type extendElement struct {
+	// a discriminated union: only one field will be set
+	field *fieldNode
+	group *groupNode
+	empty *basicNode
+}
+
+func (n *extendElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *extendElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *extendElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *extendElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *extendElement) get() node {
+	switch {
+	case n.field != nil:
+		return n.field
+	case n.group != nil:
+		return n.group
+	default:
+		return n.empty
+	}
+}
+
+type serviceNode struct {
+	basicCompositeNode
+	name  *identNode
+	decls []*serviceElement
+}
+
+type serviceElement struct {
+	// a discriminated union: only one field will be set
+	option *optionNode
+	rpc    *methodNode
+	empty  *basicNode
+}
+
+func (n *serviceElement) start() *SourcePos {
+	return n.get().start()
+}
+
+func (n *serviceElement) end() *SourcePos {
+	return n.get().end()
+}
+
+func (n *serviceElement) leadingComments() []*comment {
+	return n.get().leadingComments()
+}
+
+func (n *serviceElement) trailingComments() []*comment {
+	return n.get().trailingComments()
+}
+
+func (n *serviceElement) get() node {
+	switch {
+	case n.option != nil:
+		return n.option
+	case n.rpc != nil:
+		return n.rpc
+	default:
+		return n.empty
+	}
+}
+
+type methodNode struct {
+	basicCompositeNode
+	name    *identNode
+	input   *rpcTypeNode
+	output  *rpcTypeNode
+	options []*optionNode
+}
+
+func (n *methodNode) getInputType() node {
+	return n.input.msgType
+}
+
+func (n *methodNode) getOutputType() node {
+	return n.output.msgType
+}
+
+type rpcTypeNode struct {
+	basicCompositeNode
+	msgType       *identNode
+	streamKeyword node
+}
+
+type noSourceNode struct {
+	pos *SourcePos
+}
+
+func (n noSourceNode) start() *SourcePos {
+	return n.pos
+}
+
+func (n noSourceNode) end() *SourcePos {
+	return n.pos
+}
+
+func (n noSourceNode) leadingComments() []*comment {
+	return nil
+}
+
+func (n noSourceNode) trailingComments() []*comment {
+	return nil
+}
+
+func (n noSourceNode) getSyntax() node {
+	return n
+}
+
+func (n noSourceNode) getName() node {
+	return n
+}
+
+func (n noSourceNode) getValue() valueNode {
+	return n
+}
+
+func (n noSourceNode) fieldLabel() node {
+	return n
+}
+
+func (n noSourceNode) fieldName() node {
+	return n
+}
+
+func (n noSourceNode) fieldType() node {
+	return n
+}
+
+func (n noSourceNode) fieldTag() node {
+	return n
+}
+
+func (n noSourceNode) fieldExtendee() node {
+	return n
+}
+
+func (n noSourceNode) getGroupKeyword() node {
+	return n
+}
+
+func (n noSourceNode) rangeStart() node {
+	return n
+}
+
+func (n noSourceNode) rangeEnd() node {
+	return n
+}
+
+func (n noSourceNode) getNumber() node {
+	return n
+}
+
+func (n noSourceNode) messageName() node {
+	return n
+}
+
+func (n noSourceNode) reservedNames() []*stringLiteralNode {
+	return nil
+}
+
+func (n noSourceNode) getInputType() node {
+	return n
+}
+
+func (n noSourceNode) getOutputType() node {
+	return n
+}
+
+func (n noSourceNode) value() interface{} {
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
new file mode 100644
index 0000000..c6446d3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/doc.go
@@ -0,0 +1,10 @@
+// Package protoparse provides functionality for parsing *.proto source files
+// into descriptors that can be used with other protoreflect packages, like
+// dynamic messages and dynamic GRPC clients.
+//
+// This package links in other packages that include compiled descriptors for
+// the various "google/protobuf/*.proto" files that are included with protoc.
+// That way, like when invoking protoc, programs need not supply copies of these
+// "builtin" files. Though if copies of the files are provided, they will be
+// used instead of the builtin descriptors.
+package protoparse
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
new file mode 100644
index 0000000..c685e56
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/lexer.go
@@ -0,0 +1,766 @@
+package protoparse
+
+import (
+	"bufio"
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type runeReader struct {
+	rr     *bufio.Reader
+	unread []rune
+	err    error
+}
+
+func (rr *runeReader) readRune() (r rune, size int, err error) {
+	if rr.err != nil {
+		return 0, 0, rr.err
+	}
+	if len(rr.unread) > 0 {
+		r := rr.unread[len(rr.unread)-1]
+		rr.unread = rr.unread[:len(rr.unread)-1]
+		return r, utf8.RuneLen(r), nil
+	}
+	r, sz, err := rr.rr.ReadRune()
+	if err != nil {
+		rr.err = err
+	}
+	return r, sz, err
+}
+
+func (rr *runeReader) unreadRune(r rune) {
+	rr.unread = append(rr.unread, r)
+}
+
+func lexError(l protoLexer, pos *SourcePos, err string) {
+	pl := l.(*protoLex)
+	if pl.err == nil {
+		pl.err = ErrorWithSourcePos{Underlying: errors.New(err), Pos: pos}
+	}
+}
+
+type protoLex struct {
+	filename string
+	input    *runeReader
+	err      error
+	res      *fileNode
+
+	lineNo int
+	colNo  int
+	offset int
+
+	prevSym terminalNode
+}
+
+func newLexer(in io.Reader) *protoLex {
+	return &protoLex{input: &runeReader{rr: bufio.NewReader(in)}}
+}
+
+var keywords = map[string]int{
+	"syntax":     _SYNTAX,
+	"import":     _IMPORT,
+	"weak":       _WEAK,
+	"public":     _PUBLIC,
+	"package":    _PACKAGE,
+	"option":     _OPTION,
+	"true":       _TRUE,
+	"false":      _FALSE,
+	"inf":        _INF,
+	"nan":        _NAN,
+	"repeated":   _REPEATED,
+	"optional":   _OPTIONAL,
+	"required":   _REQUIRED,
+	"double":     _DOUBLE,
+	"float":      _FLOAT,
+	"int32":      _INT32,
+	"int64":      _INT64,
+	"uint32":     _UINT32,
+	"uint64":     _UINT64,
+	"sint32":     _SINT32,
+	"sint64":     _SINT64,
+	"fixed32":    _FIXED32,
+	"fixed64":    _FIXED64,
+	"sfixed32":   _SFIXED32,
+	"sfixed64":   _SFIXED64,
+	"bool":       _BOOL,
+	"string":     _STRING,
+	"bytes":      _BYTES,
+	"group":      _GROUP,
+	"oneof":      _ONEOF,
+	"map":        _MAP,
+	"extensions": _EXTENSIONS,
+	"to":         _TO,
+	"max":        _MAX,
+	"reserved":   _RESERVED,
+	"enum":       _ENUM,
+	"message":    _MESSAGE,
+	"extend":     _EXTEND,
+	"service":    _SERVICE,
+	"rpc":        _RPC,
+	"stream":     _STREAM,
+	"returns":    _RETURNS,
+}
+
+func (l *protoLex) cur() *SourcePos {
+	return &SourcePos{
+		Filename: l.filename,
+		Offset:   l.offset,
+		Line:     l.lineNo + 1,
+		Col:      l.colNo + 1,
+	}
+}
+
+func (l *protoLex) prev() *SourcePos {
+	if l.prevSym == nil {
+		return &SourcePos{
+			Filename: l.filename,
+			Offset:   0,
+			Line:     1,
+			Col:      1,
+		}
+	}
+	return l.prevSym.start()
+}
+
+func (l *protoLex) Lex(lval *protoSymType) int {
+	if l.err != nil {
+		// if we are already in a failed state, bail
+		lval.err = l.err
+		return _ERROR
+	}
+
+	prevLineNo := l.lineNo
+	prevColNo := l.colNo
+	prevOffset := l.offset
+	var comments []*comment
+
+	pos := func() posRange {
+		return posRange{
+			start: &SourcePos{
+				Filename: l.filename,
+				Offset:   prevOffset,
+				Line:     prevLineNo + 1,
+				Col:      prevColNo + 1,
+			},
+			end: l.cur(),
+		}
+	}
+	basic := func() basicNode {
+		return basicNode{
+			posRange: pos(),
+			leading:  comments,
+		}
+	}
+	setPrev := func(n terminalNode) {
+		nStart := n.start().Line
+		if _, ok := n.(*basicNode); ok {
+			// if the node is a simple rune, don't attribute comments to it
+			// HACK: adjusting the start line makes leading comments appear
+			// detached so logic below will naturally associated trailing
+			// comment to previous symbol
+			nStart += 2
+		}
+		if l.prevSym != nil && len(n.leadingComments()) > 0 && l.prevSym.end().Line < nStart {
+			// we may need to re-attribute the first comment to
+			// instead be previous node's trailing comment
+			prevEnd := l.prevSym.end().Line
+			comments := n.leadingComments()
+			c := comments[0]
+			commentStart := c.start.Line
+			if commentStart == prevEnd {
+				// comment is on same line as previous symbol
+				n.popLeadingComment()
+				l.prevSym.pushTrailingComment(c)
+			} else if commentStart == prevEnd+1 {
+				// comment is right after previous symbol; see if it is detached
+				// and if so re-attribute
+				singleLineStyle := strings.HasPrefix(c.text, "//")
+				line := c.end.Line
+				groupEnd := -1
+				for i := 1; i < len(comments); i++ {
+					c := comments[i]
+					newGroup := false
+					if !singleLineStyle || c.start.Line > line+1 {
+						// we've found a gap between comments, which means the
+						// previous comments were detached
+						newGroup = true
+					} else {
+						line = c.end.Line
+						singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+						if !singleLineStyle {
+							// we've found a switch from // comments to /*
+							// consider that a new group which means the
+							// previous comments were detached
+							newGroup = true
+						}
+					}
+					if newGroup {
+						groupEnd = i
+						break
+					}
+				}
+
+				if groupEnd == -1 {
+					// just one group of comments; we'll mark it as a trailing
+					// comment if it immediately follows previous symbol and is
+					// detached from current symbol
+					c1 := comments[0]
+					c2 := comments[len(comments)-1]
+					if c1.start.Line <= prevEnd+1 && c2.end.Line < nStart-1 {
+						groupEnd = len(comments)
+					}
+				}
+
+				for i := 0; i < groupEnd; i++ {
+					l.prevSym.pushTrailingComment(n.popLeadingComment())
+				}
+			}
+		}
+
+		l.prevSym = n
+	}
+	setString := func(val string) {
+		b := basic()
+		lval.str = &stringLiteralNode{val: val}
+		lval.str.setRange(&b, &b)
+		setPrev(lval.str)
+	}
+	setIdent := func(val string, kind identKind) {
+		lval.id = &identNode{basicNode: basic(), val: val, kind: kind}
+		setPrev(lval.id)
+	}
+	setInt := func(val uint64) {
+		lval.ui = &intLiteralNode{basicNode: basic(), val: val}
+		setPrev(lval.ui)
+	}
+	setFloat := func(val float64) {
+		b := basic()
+		lval.f = &floatLiteralNode{val: val}
+		lval.f.setRange(&b, &b)
+		setPrev(lval.f)
+	}
+	setRune := func() {
+		b := basic()
+		lval.b = &b
+		setPrev(lval.b)
+	}
+	setError := func(err error) {
+		lval.err = err
+		l.err = err
+	}
+
+	for {
+		c, n, err := l.input.readRune()
+		if err == io.EOF {
+			// we're not actually returning a rune, but this will associate
+			// accumulated comments as a trailing comment on last symbol
+			// (if appropriate)
+			setRune()
+			return 0
+		} else if err != nil {
+			setError(err)
+			return _ERROR
+		}
+
+		prevLineNo = l.lineNo
+		prevColNo = l.colNo
+		prevOffset = l.offset
+
+		l.offset += n
+		if c == '\n' {
+			l.colNo = 0
+			l.lineNo++
+			continue
+		} else if c == '\r' {
+			continue
+		}
+		l.colNo++
+		if c == ' ' || c == '\t' {
+			continue
+		}
+
+		if c == '.' {
+			// tokens that start with a dot include type names and decimal literals
+			cn, _, err := l.input.readRune()
+			if err != nil {
+				setRune()
+				return int(c)
+			}
+			if cn == '_' || (cn >= 'a' && cn <= 'z') || (cn >= 'A' && cn <= 'Z') {
+				l.colNo++
+				token := []rune{c, cn}
+				token = l.readIdentifier(token)
+				setIdent(string(token), identTypeName)
+				return _TYPENAME
+			}
+			if cn >= '0' && cn <= '9' {
+				l.colNo++
+				token := []rune{c, cn}
+				token = l.readNumber(token, false, true)
+				f, err := strconv.ParseFloat(string(token), 64)
+				if err != nil {
+					setError(err)
+					return _ERROR
+				}
+				setFloat(f)
+				return _FLOAT_LIT
+			}
+			l.input.unreadRune(cn)
+			setRune()
+			return int(c)
+		}
+
+		if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') {
+			// identifier
+			token := []rune{c}
+			token = l.readIdentifier(token)
+			str := string(token)
+			if strings.Contains(str, ".") {
+				setIdent(str, identQualified)
+				return _FQNAME
+			}
+			if t, ok := keywords[str]; ok {
+				setIdent(str, identSimpleName)
+				return t
+			}
+			setIdent(str, identSimpleName)
+			return _NAME
+		}
+
+		if c >= '0' && c <= '9' {
+			// integer or float literal
+			if c == '0' {
+				cn, _, err := l.input.readRune()
+				if err != nil {
+					setInt(0)
+					return _INT_LIT
+				}
+				if cn == 'x' || cn == 'X' {
+					cnn, _, err := l.input.readRune()
+					if err != nil {
+						l.input.unreadRune(cn)
+						setInt(0)
+						return _INT_LIT
+					}
+					if (cnn >= '0' && cnn <= '9') || (cnn >= 'a' && cnn <= 'f') || (cnn >= 'A' && cnn <= 'F') {
+						// hexadecimal!
+						l.colNo += 2
+						token := []rune{cnn}
+						token = l.readHexNumber(token)
+						ui, err := strconv.ParseUint(string(token), 16, 64)
+						if err != nil {
+							setError(err)
+							return _ERROR
+						}
+						setInt(ui)
+						return _INT_LIT
+					}
+					l.input.unreadRune(cnn)
+					l.input.unreadRune(cn)
+					setInt(0)
+					return _INT_LIT
+				} else {
+					l.input.unreadRune(cn)
+				}
+			}
+			token := []rune{c}
+			token = l.readNumber(token, true, true)
+			numstr := string(token)
+			if strings.Contains(numstr, ".") || strings.Contains(numstr, "e") || strings.Contains(numstr, "E") {
+				// floating point!
+				f, err := strconv.ParseFloat(numstr, 64)
+				if err != nil {
+					setError(err)
+					return _ERROR
+				}
+				setFloat(f)
+				return _FLOAT_LIT
+			}
+			// integer! (decimal or octal)
+			ui, err := strconv.ParseUint(numstr, 0, 64)
+			if err != nil {
+				setError(err)
+				return _ERROR
+			}
+			setInt(ui)
+			return _INT_LIT
+		}
+
+		if c == '\'' || c == '"' {
+			// string literal
+			str, err := l.readStringLiteral(c)
+			if err != nil {
+				setError(err)
+				return _ERROR
+			}
+			setString(str)
+			return _STRING_LIT
+		}
+
+		if c == '/' {
+			// comment
+			cn, _, err := l.input.readRune()
+			if err != nil {
+				setRune()
+				return int(c)
+			}
+			if cn == '/' {
+				l.colNo++
+				hitNewline, txt := l.skipToEndOfLineComment()
+				commentPos := pos()
+				commentPos.end.Col++
+				if hitNewline {
+					l.colNo = 0
+					l.lineNo++
+				}
+				comments = append(comments, &comment{posRange: commentPos, text: txt})
+				continue
+			}
+			if cn == '*' {
+				l.colNo++
+				if txt, ok := l.skipToEndOfBlockComment(); !ok {
+					setError(errors.New("block comment never terminates, unexpected EOF"))
+					return _ERROR
+				} else {
+					comments = append(comments, &comment{posRange: pos(), text: txt})
+				}
+				continue
+			}
+			l.input.unreadRune(cn)
+		}
+
+		setRune()
+		return int(c)
+	}
+}
+
+func (l *protoLex) readNumber(sofar []rune, allowDot bool, allowExp bool) []rune {
+	token := sofar
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			break
+		}
+		if c == '.' {
+			if !allowDot {
+				l.input.unreadRune(c)
+				break
+			}
+			allowDot = false
+			cn, _, err := l.input.readRune()
+			if err != nil {
+				l.input.unreadRune(c)
+				break
+			}
+			if cn < '0' || cn > '9' {
+				l.input.unreadRune(cn)
+				l.input.unreadRune(c)
+				break
+			}
+			l.colNo++
+			token = append(token, c)
+			c = cn
+		} else if c == 'e' || c == 'E' {
+			if !allowExp {
+				l.input.unreadRune(c)
+				break
+			}
+			allowExp = false
+			cn, _, err := l.input.readRune()
+			if err != nil {
+				l.input.unreadRune(c)
+				break
+			}
+			if cn == '-' || cn == '+' {
+				cnn, _, err := l.input.readRune()
+				if err != nil {
+					l.input.unreadRune(cn)
+					l.input.unreadRune(c)
+					break
+				}
+				if cnn < '0' || cnn > '9' {
+					l.input.unreadRune(cnn)
+					l.input.unreadRune(cn)
+					l.input.unreadRune(c)
+					break
+				}
+				l.colNo++
+				token = append(token, c)
+				c = cn
+				cn = cnn
+			} else if cn < '0' || cn > '9' {
+				l.input.unreadRune(cn)
+				l.input.unreadRune(c)
+				break
+			}
+			l.colNo++
+			token = append(token, c)
+			c = cn
+		} else if c < '0' || c > '9' {
+			l.input.unreadRune(c)
+			break
+		}
+		l.colNo++
+		token = append(token, c)
+	}
+	return token
+}
+
+func (l *protoLex) readHexNumber(sofar []rune) []rune {
+	token := sofar
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			break
+		}
+		if (c < 'a' || c > 'f') && (c < 'A' || c > 'F') && (c < '0' || c > '9') {
+			l.input.unreadRune(c)
+			break
+		}
+		l.colNo++
+		token = append(token, c)
+	}
+	return token
+}
+
+func (l *protoLex) readIdentifier(sofar []rune) []rune {
+	token := sofar
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			break
+		}
+		if c == '.' {
+			cn, _, err := l.input.readRune()
+			if err != nil {
+				l.input.unreadRune(c)
+				break
+			}
+			if cn != '_' && (cn < 'a' || cn > 'z') && (cn < 'A' || cn > 'Z') {
+				l.input.unreadRune(cn)
+				l.input.unreadRune(c)
+				break
+			}
+			l.colNo++
+			token = append(token, c)
+			c = cn
+		} else if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') {
+			l.input.unreadRune(c)
+			break
+		}
+		l.colNo++
+		token = append(token, c)
+	}
+	return token
+}
+
+func (l *protoLex) readStringLiteral(quote rune) (string, error) {
+	var buf bytes.Buffer
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			if err == io.EOF {
+				err = io.ErrUnexpectedEOF
+			}
+			return "", err
+		}
+		if c == '\n' {
+			l.colNo = 0
+			l.lineNo++
+			return "", errors.New("encountered end-of-line before end of string literal")
+		}
+		l.colNo++
+		if c == quote {
+			break
+		}
+		if c == 0 {
+			return "", errors.New("null character ('\\0') not allowed in string literal")
+		}
+		if c == '\\' {
+			// escape sequence
+			c, _, err = l.input.readRune()
+			if err != nil {
+				return "", err
+			}
+			l.colNo++
+			if c == 'x' || c == 'X' {
+				// hex escape
+				c, _, err := l.input.readRune()
+				if err != nil {
+					return "", err
+				}
+				l.colNo++
+				c2, _, err := l.input.readRune()
+				if err != nil {
+					return "", err
+				}
+				var hex string
+				if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') {
+					l.input.unreadRune(c2)
+					hex = string(c)
+				} else {
+					l.colNo++
+					hex = string([]rune{c, c2})
+				}
+				i, err := strconv.ParseInt(hex, 16, 32)
+				if err != nil {
+					return "", fmt.Errorf("invalid hex escape: \\x%q", hex)
+				}
+				buf.WriteByte(byte(i))
+
+			} else if c >= '0' && c <= '7' {
+				// octal escape
+				c2, _, err := l.input.readRune()
+				if err != nil {
+					return "", err
+				}
+				var octal string
+				if c2 < '0' || c2 > '7' {
+					l.input.unreadRune(c2)
+					octal = string(c)
+				} else {
+					l.colNo++
+					c3, _, err := l.input.readRune()
+					if err != nil {
+						return "", err
+					}
+					if c3 < '0' || c3 > '7' {
+						l.input.unreadRune(c3)
+						octal = string([]rune{c, c2})
+					} else {
+						l.colNo++
+						octal = string([]rune{c, c2, c3})
+					}
+				}
+				i, err := strconv.ParseInt(octal, 8, 32)
+				if err != nil {
+					return "", fmt.Errorf("invalid octal escape: \\%q", octal)
+				}
+				if i > 0xff {
+					return "", fmt.Errorf("octal escape is out range, must be between 0 and 377: \\%q", octal)
+				}
+				buf.WriteByte(byte(i))
+
+			} else if c == 'u' {
+				// short unicode escape
+				u := make([]rune, 4)
+				for i := range u {
+					c, _, err := l.input.readRune()
+					if err != nil {
+						return "", err
+					}
+					l.colNo++
+					u[i] = c
+				}
+				i, err := strconv.ParseInt(string(u), 16, 32)
+				if err != nil {
+					return "", fmt.Errorf("invalid unicode escape: \\u%q", string(u))
+				}
+				buf.WriteRune(rune(i))
+
+			} else if c == 'U' {
+				// long unicode escape
+				u := make([]rune, 8)
+				for i := range u {
+					c, _, err := l.input.readRune()
+					if err != nil {
+						return "", err
+					}
+					l.colNo++
+					u[i] = c
+				}
+				i, err := strconv.ParseInt(string(u), 16, 32)
+				if err != nil {
+					return "", fmt.Errorf("invalid unicode escape: \\U%q", string(u))
+				}
+				if i > 0x10ffff || i < 0 {
+					return "", fmt.Errorf("unicode escape is out of range, must be between 0 and 0x10ffff: \\U%q", string(u))
+				}
+				buf.WriteRune(rune(i))
+
+			} else if c == 'a' {
+				buf.WriteByte('\a')
+			} else if c == 'b' {
+				buf.WriteByte('\b')
+			} else if c == 'f' {
+				buf.WriteByte('\f')
+			} else if c == 'n' {
+				buf.WriteByte('\n')
+			} else if c == 'r' {
+				buf.WriteByte('\r')
+			} else if c == 't' {
+				buf.WriteByte('\t')
+			} else if c == 'v' {
+				buf.WriteByte('\v')
+			} else if c == '\\' {
+				buf.WriteByte('\\')
+			} else if c == '\'' {
+				buf.WriteByte('\'')
+			} else if c == '"' {
+				buf.WriteByte('"')
+			} else if c == '?' {
+				buf.WriteByte('?')
+			} else {
+				return "", fmt.Errorf("invalid escape sequence: %q", "\\"+string(c))
+			}
+		} else {
+			buf.WriteRune(c)
+		}
+	}
+	return buf.String(), nil
+}
+
+func (l *protoLex) skipToEndOfLineComment() (bool, string) {
+	txt := []rune{'/', '/'}
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			return false, string(txt)
+		}
+		if c == '\n' {
+			return true, string(txt)
+		}
+		l.colNo++
+		txt = append(txt, c)
+	}
+}
+
+func (l *protoLex) skipToEndOfBlockComment() (string, bool) {
+	txt := []rune{'/', '*'}
+	for {
+		c, _, err := l.input.readRune()
+		if err != nil {
+			return "", false
+		}
+		if c == '\n' {
+			l.colNo = 0
+			l.lineNo++
+		} else {
+			l.colNo++
+		}
+		txt = append(txt, c)
+		if c == '*' {
+			c, _, err := l.input.readRune()
+			if err != nil {
+				return "", false
+			}
+			if c == '/' {
+				l.colNo++
+				txt = append(txt, c)
+				return string(txt), true
+			}
+			l.input.unreadRune(c)
+		}
+	}
+}
+
+func (l *protoLex) Error(s string) {
+	if l.err == nil {
+		l.err = ErrorWithSourcePos{Underlying: errors.New(s), Pos: l.prevSym.start()}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
new file mode 100644
index 0000000..c150936
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/linker.go
@@ -0,0 +1,652 @@
+package protoparse
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+type linker struct {
+	files          map[string]*parseResult
+	descriptorPool map[*dpb.FileDescriptorProto]map[string]proto.Message
+	extensions     map[string]map[int32]string
+}
+
+func newLinker(files map[string]*parseResult) *linker {
+	return &linker{files: files}
+}
+
+func (l *linker) linkFiles() (map[string]*desc.FileDescriptor, error) {
+	// First, we put all symbols into a single pool, which lets us ensure there
+	// are no duplicate symbols and will also let us resolve and revise all type
+	// references in next step.
+	if err := l.createDescriptorPool(); err != nil {
+		return nil, err
+	}
+
+	// After we've populated the pool, we can now try to resolve all type
+	// references. All references must be checked for correct type, any fields
+	// with enum types must be corrected (since we parse them as if they are
+	// message references since we don't actually know message or enum until
+	// link time), and references will be re-written to be fully-qualified
+	// references (e.g. start with a dot ".").
+	if err := l.resolveReferences(); err != nil {
+		return nil, err
+	}
+
+	// Now we've validated the descriptors, so we can link them into rich
+	// descriptors. This is a little redundant since that step does similar
+	// checking of symbols. But, without breaking encapsulation (e.g. exporting
+	// a lot of fields from desc package that are currently unexported) or
+	// merging this into the same package, we can't really prevent it.
+	linked, err := l.createdLinkedDescriptors()
+	if err != nil {
+		return nil, err
+	}
+
+	// Now that we have linked descriptors, we can interpret any uninterpreted
+	// options that remain.
+	for _, r := range l.files {
+		fd := linked[r.fd.GetName()]
+		if err := interpretFileOptions(r, richFileDescriptorish{FileDescriptor: fd}); err != nil {
+			return nil, err
+		}
+	}
+
+	return linked, nil
+}
+
+func (l *linker) createDescriptorPool() error {
+	l.descriptorPool = map[*dpb.FileDescriptorProto]map[string]proto.Message{}
+	for _, r := range l.files {
+		fd := r.fd
+		pool := map[string]proto.Message{}
+		l.descriptorPool[fd] = pool
+		prefix := fd.GetPackage()
+		if prefix != "" {
+			prefix += "."
+		}
+		for _, md := range fd.MessageType {
+			if err := addMessageToPool(r, pool, prefix, md); err != nil {
+				return err
+			}
+		}
+		for _, fld := range fd.Extension {
+			if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+				return err
+			}
+		}
+		for _, ed := range fd.EnumType {
+			if err := addEnumToPool(r, pool, prefix, ed); err != nil {
+				return err
+			}
+		}
+		for _, sd := range fd.Service {
+			if err := addServiceToPool(r, pool, prefix, sd); err != nil {
+				return err
+			}
+		}
+	}
+	// try putting everything into a single pool, to ensure there are no duplicates
+	// across files (e.g. same symbol, but declared in two different files)
+	type entry struct {
+		file string
+		msg  proto.Message
+	}
+	pool := map[string]entry{}
+	for f, p := range l.descriptorPool {
+		for k, v := range p {
+			if e, ok := pool[k]; ok {
+				desc1 := e.msg
+				file1 := e.file
+				desc2 := v
+				file2 := f.GetName()
+				if file2 < file1 {
+					file1, file2 = file2, file1
+					desc1, desc2 = desc2, desc1
+				}
+				node := l.files[file2].nodes[desc2]
+				return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s in %q", k, descriptorType(desc1), file1)}
+			}
+			pool[k] = entry{file: f.GetName(), msg: v}
+		}
+	}
+
+	return nil
+}
+
+func addMessageToPool(r *parseResult, pool map[string]proto.Message, prefix string, md *dpb.DescriptorProto) error {
+	fqn := prefix + md.GetName()
+	if err := addToPool(r, pool, fqn, md); err != nil {
+		return err
+	}
+	prefix = fqn + "."
+	for _, fld := range md.Field {
+		if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+			return err
+		}
+	}
+	for _, fld := range md.Extension {
+		if err := addFieldToPool(r, pool, prefix, fld); err != nil {
+			return err
+		}
+	}
+	for _, nmd := range md.NestedType {
+		if err := addMessageToPool(r, pool, prefix, nmd); err != nil {
+			return err
+		}
+	}
+	for _, ed := range md.EnumType {
+		if err := addEnumToPool(r, pool, prefix, ed); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func addFieldToPool(r *parseResult, pool map[string]proto.Message, prefix string, fld *dpb.FieldDescriptorProto) error {
+	fqn := prefix + fld.GetName()
+	return addToPool(r, pool, fqn, fld)
+}
+
+func addEnumToPool(r *parseResult, pool map[string]proto.Message, prefix string, ed *dpb.EnumDescriptorProto) error {
+	fqn := prefix + ed.GetName()
+	if err := addToPool(r, pool, fqn, ed); err != nil {
+		return err
+	}
+	for _, evd := range ed.Value {
+		vfqn := fqn + "." + evd.GetName()
+		if err := addToPool(r, pool, vfqn, evd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func addServiceToPool(r *parseResult, pool map[string]proto.Message, prefix string, sd *dpb.ServiceDescriptorProto) error {
+	fqn := prefix + sd.GetName()
+	if err := addToPool(r, pool, fqn, sd); err != nil {
+		return err
+	}
+	for _, mtd := range sd.Method {
+		mfqn := fqn + "." + mtd.GetName()
+		if err := addToPool(r, pool, mfqn, mtd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func addToPool(r *parseResult, pool map[string]proto.Message, fqn string, dsc proto.Message) error {
+	if d, ok := pool[fqn]; ok {
+		node := r.nodes[dsc]
+		return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("duplicate symbol %s: already defined as %s", fqn, descriptorType(d))}
+	}
+	pool[fqn] = dsc
+	return nil
+}
+
+func descriptorType(m proto.Message) string {
+	switch m := m.(type) {
+	case *dpb.DescriptorProto:
+		return "message"
+	case *dpb.DescriptorProto_ExtensionRange:
+		return "extension range"
+	case *dpb.FieldDescriptorProto:
+		if m.GetExtendee() == "" {
+			return "field"
+		} else {
+			return "extension"
+		}
+	case *dpb.EnumDescriptorProto:
+		return "enum"
+	case *dpb.EnumValueDescriptorProto:
+		return "enum value"
+	case *dpb.ServiceDescriptorProto:
+		return "service"
+	case *dpb.MethodDescriptorProto:
+		return "method"
+	case *dpb.FileDescriptorProto:
+		return "file"
+	default:
+		// shouldn't be possible
+		return fmt.Sprintf("%T", m)
+	}
+}
+
+func (l *linker) resolveReferences() error {
+	l.extensions = map[string]map[int32]string{}
+	for _, r := range l.files {
+		fd := r.fd
+		prefix := fd.GetPackage()
+		scopes := []scope{fileScope(fd, l)}
+		if prefix != "" {
+			prefix += "."
+		}
+		if fd.Options != nil {
+			if err := l.resolveOptions(r, fd, "file", fd.GetName(), proto.MessageName(fd.Options), fd.Options.UninterpretedOption, scopes); err != nil {
+				return err
+			}
+		}
+		for _, md := range fd.MessageType {
+			if err := l.resolveMessageTypes(r, fd, prefix, md, scopes); err != nil {
+				return err
+			}
+		}
+		for _, fld := range fd.Extension {
+			if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+				return err
+			}
+		}
+		for _, ed := range fd.EnumType {
+			if err := l.resolveEnumTypes(r, fd, prefix, ed, scopes); err != nil {
+				return err
+			}
+		}
+		for _, sd := range fd.Service {
+			if err := l.resolveServiceTypes(r, fd, prefix, sd, scopes); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (l *linker) resolveEnumTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, ed *dpb.EnumDescriptorProto, scopes []scope) error {
+	enumFqn := prefix + ed.GetName()
+	if ed.Options != nil {
+		if err := l.resolveOptions(r, fd, "enum", enumFqn, proto.MessageName(ed.Options), ed.Options.UninterpretedOption, scopes); err != nil {
+			return err
+		}
+	}
+	for _, evd := range ed.Value {
+		if evd.Options != nil {
+			evFqn := enumFqn + "." + evd.GetName()
+			if err := l.resolveOptions(r, fd, "enum value", evFqn, proto.MessageName(evd.Options), evd.Options.UninterpretedOption, scopes); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (l *linker) resolveMessageTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, md *dpb.DescriptorProto, scopes []scope) error {
+	fqn := prefix + md.GetName()
+	scope := messageScope(fqn, isProto3(fd), l.descriptorPool[fd])
+	scopes = append(scopes, scope)
+	prefix = fqn + "."
+
+	if md.Options != nil {
+		if err := l.resolveOptions(r, fd, "message", fqn, proto.MessageName(md.Options), md.Options.UninterpretedOption, scopes); err != nil {
+			return err
+		}
+	}
+
+	for _, nmd := range md.NestedType {
+		if err := l.resolveMessageTypes(r, fd, prefix, nmd, scopes); err != nil {
+			return err
+		}
+	}
+	for _, ned := range md.EnumType {
+		if err := l.resolveEnumTypes(r, fd, prefix, ned, scopes); err != nil {
+			return err
+		}
+	}
+	for _, fld := range md.Field {
+		if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+			return err
+		}
+	}
+	for _, fld := range md.Extension {
+		if err := l.resolveFieldTypes(r, fd, prefix, fld, scopes); err != nil {
+			return err
+		}
+	}
+	for _, er := range md.ExtensionRange {
+		if er.Options != nil {
+			erName := fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1)
+			if err := l.resolveOptions(r, fd, "extension range", erName, proto.MessageName(er.Options), er.Options.UninterpretedOption, scopes); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (l *linker) resolveFieldTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, fld *dpb.FieldDescriptorProto, scopes []scope) error {
+	thisName := prefix + fld.GetName()
+	scope := fmt.Sprintf("field %s", thisName)
+	node := r.getFieldNode(fld)
+	elemType := "field"
+	if fld.GetExtendee() != "" {
+		fqn, dsc, _ := l.resolve(fd, fld.GetExtendee(), isMessage, scopes)
+		if dsc == nil {
+			return ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("unknown extendee type %s", fld.GetExtendee())}
+		}
+		extd, ok := dsc.(*dpb.DescriptorProto)
+		if !ok {
+			otherType := descriptorType(dsc)
+			return ErrorWithSourcePos{Pos: node.fieldExtendee().start(), Underlying: fmt.Errorf("extendee is invalid: %s is a %s, not a message", fqn, otherType)}
+		}
+		fld.Extendee = proto.String("." + fqn)
+		// make sure the tag number is in range
+		found := false
+		tag := fld.GetNumber()
+		for _, rng := range extd.ExtensionRange {
+			if tag >= rng.GetStart() && tag < rng.GetEnd() {
+				found = true
+				break
+			}
+		}
+		if !found {
+			return ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: tag %d is not in valid range for extended type %s", scope, tag, fqn)}
+		}
+		// make sure tag is not a duplicate
+		usedExtTags := l.extensions[fqn]
+		if usedExtTags == nil {
+			usedExtTags = map[int32]string{}
+			l.extensions[fqn] = usedExtTags
+		}
+		if other := usedExtTags[fld.GetNumber()]; other != "" {
+			return ErrorWithSourcePos{Pos: node.fieldTag().start(), Underlying: fmt.Errorf("%s: duplicate extension: %s and %s are both using tag %d", scope, other, thisName, fld.GetNumber())}
+		}
+		usedExtTags[fld.GetNumber()] = thisName
+		elemType = "extension"
+	}
+
+	if fld.Options != nil {
+		if err := l.resolveOptions(r, fd, elemType, thisName, proto.MessageName(fld.Options), fld.Options.UninterpretedOption, scopes); err != nil {
+			return err
+		}
+	}
+
+	if fld.GetTypeName() == "" {
+		// scalar type; no further resolution required
+		return nil
+	}
+
+	fqn, dsc, proto3 := l.resolve(fd, fld.GetTypeName(), isType, scopes)
+	if dsc == nil {
+		return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: unknown type %s", scope, fld.GetTypeName())}
+	}
+	switch dsc := dsc.(type) {
+	case *dpb.DescriptorProto:
+		fld.TypeName = proto.String("." + fqn)
+	case *dpb.EnumDescriptorProto:
+		if fld.GetExtendee() == "" && isProto3(fd) && !proto3 {
+			// fields in a proto3 message cannot refer to proto2 enums
+			return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: cannot use proto2 enum %s in a proto3 message", scope, fld.GetTypeName())}
+		}
+		fld.TypeName = proto.String("." + fqn)
+		// the type was tentatively set to message, but now we know it's actually an enum
+		fld.Type = dpb.FieldDescriptorProto_TYPE_ENUM.Enum()
+	default:
+		otherType := descriptorType(dsc)
+		return ErrorWithSourcePos{Pos: node.fieldType().start(), Underlying: fmt.Errorf("%s: invalid type: %s is a %s, not a message or enum", scope, fqn, otherType)}
+	}
+	return nil
+}
+
+func (l *linker) resolveServiceTypes(r *parseResult, fd *dpb.FileDescriptorProto, prefix string, sd *dpb.ServiceDescriptorProto, scopes []scope) error {
+	thisName := prefix + sd.GetName()
+	if sd.Options != nil {
+		if err := l.resolveOptions(r, fd, "service", thisName, proto.MessageName(sd.Options), sd.Options.UninterpretedOption, scopes); err != nil {
+			return err
+		}
+	}
+
+	for _, mtd := range sd.Method {
+		if mtd.Options != nil {
+			if err := l.resolveOptions(r, fd, "method", thisName+"."+mtd.GetName(), proto.MessageName(mtd.Options), mtd.Options.UninterpretedOption, scopes); err != nil {
+				return err
+			}
+		}
+		scope := fmt.Sprintf("method %s.%s", thisName, mtd.GetName())
+		node := r.getMethodNode(mtd)
+		fqn, dsc, _ := l.resolve(fd, mtd.GetInputType(), isMessage, scopes)
+		if dsc == nil {
+			return ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: unknown request type %s", scope, mtd.GetInputType())}
+		}
+		if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+			otherType := descriptorType(dsc)
+			return ErrorWithSourcePos{Pos: node.getInputType().start(), Underlying: fmt.Errorf("%s: invalid request type: %s is a %s, not a message", scope, fqn, otherType)}
+		}
+		mtd.InputType = proto.String("." + fqn)
+
+		fqn, dsc, _ = l.resolve(fd, mtd.GetOutputType(), isMessage, scopes)
+		if dsc == nil {
+			return ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: unknown response type %s", scope, mtd.GetOutputType())}
+		}
+		if _, ok := dsc.(*dpb.DescriptorProto); !ok {
+			otherType := descriptorType(dsc)
+			return ErrorWithSourcePos{Pos: node.getOutputType().start(), Underlying: fmt.Errorf("%s: invalid response type: %s is a %s, not a message", scope, fqn, otherType)}
+		}
+		mtd.OutputType = proto.String("." + fqn)
+	}
+	return nil
+}
+
+func (l *linker) resolveOptions(r *parseResult, fd *dpb.FileDescriptorProto, elemType, elemName, optType string, opts []*dpb.UninterpretedOption, scopes []scope) error {
+	var scope string
+	if elemType != "file" {
+		scope = fmt.Sprintf("%s %s: ", elemType, elemName)
+	}
+	for _, opt := range opts {
+		for _, nm := range opt.Name {
+			if nm.GetIsExtension() {
+				node := r.getOptionNamePartNode(nm)
+				fqn, dsc, _ := l.resolve(fd, nm.GetNamePart(), isField, scopes)
+				if dsc == nil {
+					return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sunknown extension %s", scope, nm.GetNamePart())}
+				}
+				if ext, ok := dsc.(*dpb.FieldDescriptorProto); !ok {
+					otherType := descriptorType(dsc)
+					return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a %s, not an extension", scope, nm.GetNamePart(), otherType)}
+				} else if ext.GetExtendee() == "" {
+					return ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("%sinvalid extension: %s is a field but not an extension", scope, nm.GetNamePart())}
+				}
+				nm.NamePart = proto.String("." + fqn)
+			}
+		}
+	}
+	return nil
+}
+
+func (l *linker) resolve(fd *dpb.FileDescriptorProto, name string, allowed func(proto.Message) bool, scopes []scope) (fqn string, element proto.Message, proto3 bool) {
+	if strings.HasPrefix(name, ".") {
+		// already fully-qualified
+		d, proto3 := l.findSymbol(fd, name[1:], false, map[*dpb.FileDescriptorProto]struct{}{})
+		if d != nil {
+			return name[1:], d, proto3
+		}
+	} else {
+		// unqualified, so we look in the enclosing (last) scope first and move
+		// towards outermost (first) scope, trying to resolve the symbol
+		var bestGuess proto.Message
+		var bestGuessFqn string
+		var bestGuessProto3 bool
+		for i := len(scopes) - 1; i >= 0; i-- {
+			fqn, d, proto3 := scopes[i](name)
+			if d != nil {
+				if allowed(d) {
+					return fqn, d, proto3
+				} else if bestGuess == nil {
+					bestGuess = d
+					bestGuessFqn = fqn
+					bestGuessProto3 = proto3
+				}
+			}
+		}
+		// we return best guess, even though it was not an allowed kind of
+		// descriptor, so caller can print a better error message (e.g.
+		// indicating that the name was found but that it's the wrong type)
+		return bestGuessFqn, bestGuess, bestGuessProto3
+	}
+	return "", nil, false
+}
+
+func isField(m proto.Message) bool {
+	_, ok := m.(*dpb.FieldDescriptorProto)
+	return ok
+}
+
+func isMessage(m proto.Message) bool {
+	_, ok := m.(*dpb.DescriptorProto)
+	return ok
+}
+
+func isType(m proto.Message) bool {
+	switch m.(type) {
+	case *dpb.DescriptorProto, *dpb.EnumDescriptorProto:
+		return true
+	}
+	return false
+}
+
+// scope represents a lexical scope in a proto file in which messages and enums
+// can be declared.
+type scope func(symbol string) (fqn string, element proto.Message, proto3 bool)
+
+func fileScope(fd *dpb.FileDescriptorProto, l *linker) scope {
+	// we search symbols in this file, but also symbols in other files that have
+	// the same package as this file or a "parent" package (in protobuf,
+	// packages are a hierarchy like C++ namespaces)
+	prefixes := internal.CreatePrefixList(fd.GetPackage())
+	return func(name string) (string, proto.Message, bool) {
+		for _, prefix := range prefixes {
+			var n string
+			if prefix == "" {
+				n = name
+			} else {
+				n = prefix + "." + name
+			}
+			d, proto3 := l.findSymbol(fd, n, false, map[*dpb.FileDescriptorProto]struct{}{})
+			if d != nil {
+				return n, d, proto3
+			}
+		}
+		return "", nil, false
+	}
+}
+
+func messageScope(messageName string, proto3 bool, filePool map[string]proto.Message) scope {
+	return func(name string) (string, proto.Message, bool) {
+		n := messageName + "." + name
+		if d, ok := filePool[n]; ok {
+			return n, d, proto3
+		}
+		return "", nil, false
+	}
+}
+
+func (l *linker) findSymbol(fd *dpb.FileDescriptorProto, name string, public bool, checked map[*dpb.FileDescriptorProto]struct{}) (element proto.Message, proto3 bool) {
+	if _, ok := checked[fd]; ok {
+		// already checked this one
+		return nil, false
+	}
+	checked[fd] = struct{}{}
+	d := l.descriptorPool[fd][name]
+	if d != nil {
+		return d, isProto3(fd)
+	}
+
+	// When public = false, we are searching only directly imported symbols. But we
+	// also need to search transitive public imports due to semantics of public imports.
+	if public {
+		for _, depIndex := range fd.PublicDependency {
+			dep := fd.Dependency[depIndex]
+			depres := l.files[dep]
+			if depres == nil {
+				// we'll catch this error later
+				continue
+			}
+			if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+				return d, proto3
+			}
+		}
+	} else {
+		for _, dep := range fd.Dependency {
+			depres := l.files[dep]
+			if depres == nil {
+				// we'll catch this error later
+				continue
+			}
+			if d, proto3 := l.findSymbol(depres.fd, name, true, checked); d != nil {
+				return d, proto3
+			}
+		}
+	}
+
+	return nil, false
+}
+
+func isProto3(fd *dpb.FileDescriptorProto) bool {
+	return fd.GetSyntax() == "proto3"
+}
+
+func (l *linker) createdLinkedDescriptors() (map[string]*desc.FileDescriptor, error) {
+	names := make([]string, 0, len(l.files))
+	for name := range l.files {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+	linked := map[string]*desc.FileDescriptor{}
+	for _, name := range names {
+		if _, err := l.linkFile(name, nil, linked); err != nil {
+			return nil, err
+		}
+	}
+	return linked, nil
+}
+
+func (l *linker) linkFile(name string, seen []string, linked map[string]*desc.FileDescriptor) (*desc.FileDescriptor, error) {
+	// check for import cycle
+	for _, s := range seen {
+		if name == s {
+			var msg bytes.Buffer
+			first := true
+			for _, s := range seen {
+				if first {
+					first = false
+				} else {
+					msg.WriteString(" -> ")
+				}
+				fmt.Fprintf(&msg, "%q", s)
+			}
+			fmt.Fprintf(&msg, " -> %q", name)
+			return nil, fmt.Errorf("cycle found in imports: %s", msg.String())
+		}
+	}
+	seen = append(seen, name)
+
+	if lfd, ok := linked[name]; ok {
+		// already linked
+		return lfd, nil
+	}
+	r := l.files[name]
+	if r == nil {
+		importer := seen[len(seen)-2] // len-1 is *this* file, before that is the one that imported it
+		return nil, fmt.Errorf("no descriptor found for %q, imported by %q", name, importer)
+	}
+	var deps []*desc.FileDescriptor
+	for _, dep := range r.fd.Dependency {
+		ldep, err := l.linkFile(dep, seen, linked)
+		if err != nil {
+			return nil, err
+		}
+		deps = append(deps, ldep)
+	}
+	lfd, err := desc.CreateFileDescriptor(r.fd, deps...)
+	if err != nil {
+		return nil, fmt.Errorf("error linking %q: %s", name, err)
+	}
+	linked[name] = lfd
+	return lfd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
new file mode 100644
index 0000000..be287f6
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/options.go
@@ -0,0 +1,1405 @@
+package protoparse
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/internal"
+	"github.com/jhump/protoreflect/dynamic"
+)
+
+// NB: To process options, we need descriptors, but we may not have rich
+// descriptors when trying to interpret options for unlinked parsed files.
+// So we define minimal interfaces that can be backed by both rich descriptors
+// as well as their poorer cousins, plain ol' descriptor protos.
+
+type descriptorish interface {
+	GetFile() fileDescriptorish
+	GetFullyQualifiedName() string
+	AsProto() proto.Message
+}
+
+type fileDescriptorish interface {
+	descriptorish
+	GetFileOptions() *dpb.FileOptions
+	GetPackage() string
+	FindSymbol(name string) desc.Descriptor
+	GetPublicDependencies() []fileDescriptorish
+	GetDependencies() []fileDescriptorish
+	GetMessageTypes() []msgDescriptorish
+	GetExtensions() []fldDescriptorish
+	GetEnumTypes() []enumDescriptorish
+	GetServices() []svcDescriptorish
+}
+
+type msgDescriptorish interface {
+	descriptorish
+	GetMessageOptions() *dpb.MessageOptions
+	GetFields() []fldDescriptorish
+	GetOneOfs() []oneofDescriptorish
+	GetExtensionRanges() []extRangeDescriptorish
+	GetNestedMessageTypes() []msgDescriptorish
+	GetNestedExtensions() []fldDescriptorish
+	GetNestedEnumTypes() []enumDescriptorish
+}
+
+type fldDescriptorish interface {
+	descriptorish
+	GetFieldOptions() *dpb.FieldOptions
+	GetMessageType() *desc.MessageDescriptor
+	GetEnumType() *desc.EnumDescriptor
+	AsFieldDescriptorProto() *dpb.FieldDescriptorProto
+}
+
+type oneofDescriptorish interface {
+	descriptorish
+	GetOneOfOptions() *dpb.OneofOptions
+}
+
+type enumDescriptorish interface {
+	descriptorish
+	GetEnumOptions() *dpb.EnumOptions
+	GetValues() []enumValDescriptorish
+}
+
+type enumValDescriptorish interface {
+	descriptorish
+	GetEnumValueOptions() *dpb.EnumValueOptions
+}
+
+type svcDescriptorish interface {
+	descriptorish
+	GetServiceOptions() *dpb.ServiceOptions
+	GetMethods() []methodDescriptorish
+}
+
+type methodDescriptorish interface {
+	descriptorish
+	GetMethodOptions() *dpb.MethodOptions
+}
+
+// The hierarchy of descriptorish implementations backed by
+// rich descriptors:
+
+type richFileDescriptorish struct {
+	*desc.FileDescriptor
+}
+
+func (d richFileDescriptorish) GetFile() fileDescriptorish {
+	return d
+}
+
+func (d richFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+	deps := d.FileDescriptor.GetPublicDependencies()
+	ret := make([]fileDescriptorish, len(deps))
+	for i, d := range deps {
+		ret[i] = richFileDescriptorish{FileDescriptor: d}
+	}
+	return ret
+}
+
+func (d richFileDescriptorish) GetDependencies() []fileDescriptorish {
+	deps := d.FileDescriptor.GetDependencies()
+	ret := make([]fileDescriptorish, len(deps))
+	for i, d := range deps {
+		ret[i] = richFileDescriptorish{FileDescriptor: d}
+	}
+	return ret
+}
+
+func (d richFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+	msgs := d.FileDescriptor.GetMessageTypes()
+	ret := make([]msgDescriptorish, len(msgs))
+	for i, m := range msgs {
+		ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+	}
+	return ret
+}
+
+func (d richFileDescriptorish) GetExtensions() []fldDescriptorish {
+	flds := d.FileDescriptor.GetExtensions()
+	ret := make([]fldDescriptorish, len(flds))
+	for i, f := range flds {
+		ret[i] = richFldDescriptorish{FieldDescriptor: f}
+	}
+	return ret
+}
+
+func (d richFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+	ens := d.FileDescriptor.GetEnumTypes()
+	ret := make([]enumDescriptorish, len(ens))
+	for i, en := range ens {
+		ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+	}
+	return ret
+}
+
+func (d richFileDescriptorish) GetServices() []svcDescriptorish {
+	svcs := d.FileDescriptor.GetServices()
+	ret := make([]svcDescriptorish, len(svcs))
+	for i, s := range svcs {
+		ret[i] = richSvcDescriptorish{ServiceDescriptor: s}
+	}
+	return ret
+}
+
+type richMsgDescriptorish struct {
+	*desc.MessageDescriptor
+}
+
+func (d richMsgDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.MessageDescriptor.GetFile()}
+}
+
+func (d richMsgDescriptorish) GetFields() []fldDescriptorish {
+	flds := d.MessageDescriptor.GetFields()
+	ret := make([]fldDescriptorish, len(flds))
+	for i, f := range flds {
+		ret[i] = richFldDescriptorish{FieldDescriptor: f}
+	}
+	return ret
+}
+
+func (d richMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+	oos := d.MessageDescriptor.GetOneOfs()
+	ret := make([]oneofDescriptorish, len(oos))
+	for i, oo := range oos {
+		ret[i] = richOneOfDescriptorish{OneOfDescriptor: oo}
+	}
+	return ret
+}
+
+func (d richMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+	md := d.MessageDescriptor
+	mdFqn := md.GetFullyQualifiedName()
+	extrs := md.AsDescriptorProto().GetExtensionRange()
+	ret := make([]extRangeDescriptorish, len(extrs))
+	for i, extr := range extrs {
+		ret[i] = extRangeDescriptorish{
+			er:   extr,
+			qual: mdFqn,
+			file: richFileDescriptorish{FileDescriptor: md.GetFile()},
+		}
+	}
+	return ret
+}
+
+func (d richMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+	msgs := d.MessageDescriptor.GetNestedMessageTypes()
+	ret := make([]msgDescriptorish, len(msgs))
+	for i, m := range msgs {
+		ret[i] = richMsgDescriptorish{MessageDescriptor: m}
+	}
+	return ret
+}
+
+func (d richMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+	flds := d.MessageDescriptor.GetNestedExtensions()
+	ret := make([]fldDescriptorish, len(flds))
+	for i, f := range flds {
+		ret[i] = richFldDescriptorish{FieldDescriptor: f}
+	}
+	return ret
+}
+
+func (d richMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+	ens := d.MessageDescriptor.GetNestedEnumTypes()
+	ret := make([]enumDescriptorish, len(ens))
+	for i, en := range ens {
+		ret[i] = richEnumDescriptorish{EnumDescriptor: en}
+	}
+	return ret
+}
+
+type richFldDescriptorish struct {
+	*desc.FieldDescriptor
+}
+
+func (d richFldDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.FieldDescriptor.GetFile()}
+}
+
+func (d richFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+	return d.FieldDescriptor.AsFieldDescriptorProto()
+}
+
+type richOneOfDescriptorish struct {
+	*desc.OneOfDescriptor
+}
+
+func (d richOneOfDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.OneOfDescriptor.GetFile()}
+}
+
+type richEnumDescriptorish struct {
+	*desc.EnumDescriptor
+}
+
+func (d richEnumDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.EnumDescriptor.GetFile()}
+}
+
+func (d richEnumDescriptorish) GetValues() []enumValDescriptorish {
+	vals := d.EnumDescriptor.GetValues()
+	ret := make([]enumValDescriptorish, len(vals))
+	for i, val := range vals {
+		ret[i] = richEnumValDescriptorish{EnumValueDescriptor: val}
+	}
+	return ret
+}
+
+type richEnumValDescriptorish struct {
+	*desc.EnumValueDescriptor
+}
+
+func (d richEnumValDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.EnumValueDescriptor.GetFile()}
+}
+
+type richSvcDescriptorish struct {
+	*desc.ServiceDescriptor
+}
+
+func (d richSvcDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.ServiceDescriptor.GetFile()}
+}
+
+func (d richSvcDescriptorish) GetMethods() []methodDescriptorish {
+	mtds := d.ServiceDescriptor.GetMethods()
+	ret := make([]methodDescriptorish, len(mtds))
+	for i, mtd := range mtds {
+		ret[i] = richMethodDescriptorish{MethodDescriptor: mtd}
+	}
+	return ret
+}
+
+type richMethodDescriptorish struct {
+	*desc.MethodDescriptor
+}
+
+func (d richMethodDescriptorish) GetFile() fileDescriptorish {
+	return richFileDescriptorish{FileDescriptor: d.MethodDescriptor.GetFile()}
+}
+
+// The hierarchy of descriptorish implementations backed by
+// plain descriptor protos:
+
+type poorFileDescriptorish struct {
+	*dpb.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFile() fileDescriptorish {
+	return d
+}
+
+func (d poorFileDescriptorish) GetFullyQualifiedName() string {
+	return d.FileDescriptorProto.GetName()
+}
+
+func (d poorFileDescriptorish) AsProto() proto.Message {
+	return d.FileDescriptorProto
+}
+
+func (d poorFileDescriptorish) GetFileOptions() *dpb.FileOptions {
+	return d.FileDescriptorProto.GetOptions()
+}
+
+func (d poorFileDescriptorish) FindSymbol(name string) desc.Descriptor {
+	return nil
+}
+
+func (d poorFileDescriptorish) GetPublicDependencies() []fileDescriptorish {
+	return nil
+}
+
+func (d poorFileDescriptorish) GetDependencies() []fileDescriptorish {
+	return nil
+}
+
+func (d poorFileDescriptorish) GetMessageTypes() []msgDescriptorish {
+	msgs := d.FileDescriptorProto.GetMessageType()
+	pkg := d.FileDescriptorProto.GetPackage()
+	ret := make([]msgDescriptorish, len(msgs))
+	for i, m := range msgs {
+		ret[i] = poorMsgDescriptorish{
+			DescriptorProto: m,
+			qual:            pkg,
+			file:            d,
+		}
+	}
+	return ret
+}
+
+func (d poorFileDescriptorish) GetExtensions() []fldDescriptorish {
+	exts := d.FileDescriptorProto.GetExtension()
+	pkg := d.FileDescriptorProto.GetPackage()
+	ret := make([]fldDescriptorish, len(exts))
+	for i, e := range exts {
+		ret[i] = poorFldDescriptorish{
+			FieldDescriptorProto: e,
+			qual:                 pkg,
+			file:                 d,
+		}
+	}
+	return ret
+}
+
+func (d poorFileDescriptorish) GetEnumTypes() []enumDescriptorish {
+	ens := d.FileDescriptorProto.GetEnumType()
+	pkg := d.FileDescriptorProto.GetPackage()
+	ret := make([]enumDescriptorish, len(ens))
+	for i, e := range ens {
+		ret[i] = poorEnumDescriptorish{
+			EnumDescriptorProto: e,
+			qual:                pkg,
+			file:                d,
+		}
+	}
+	return ret
+}
+
+func (d poorFileDescriptorish) GetServices() []svcDescriptorish {
+	svcs := d.FileDescriptorProto.GetService()
+	pkg := d.FileDescriptorProto.GetPackage()
+	ret := make([]svcDescriptorish, len(svcs))
+	for i, s := range svcs {
+		ret[i] = poorSvcDescriptorish{
+			ServiceDescriptorProto: s,
+			qual:                   pkg,
+			file:                   d,
+		}
+	}
+	return ret
+}
+
+type poorMsgDescriptorish struct {
+	*dpb.DescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorMsgDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorMsgDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.DescriptorProto.GetName())
+}
+
+func qualify(qual, name string) string {
+	if qual == "" {
+		return name
+	} else {
+		return fmt.Sprintf("%s.%s", qual, name)
+	}
+}
+
+func (d poorMsgDescriptorish) AsProto() proto.Message {
+	return d.DescriptorProto
+}
+
+func (d poorMsgDescriptorish) GetMessageOptions() *dpb.MessageOptions {
+	return d.DescriptorProto.GetOptions()
+}
+
+func (d poorMsgDescriptorish) GetFields() []fldDescriptorish {
+	flds := d.DescriptorProto.GetField()
+	ret := make([]fldDescriptorish, len(flds))
+	for i, f := range flds {
+		ret[i] = poorFldDescriptorish{
+			FieldDescriptorProto: f,
+			qual:                 d.GetFullyQualifiedName(),
+			file:                 d.file,
+		}
+	}
+	return ret
+}
+
+func (d poorMsgDescriptorish) GetOneOfs() []oneofDescriptorish {
+	oos := d.DescriptorProto.GetOneofDecl()
+	ret := make([]oneofDescriptorish, len(oos))
+	for i, oo := range oos {
+		ret[i] = poorOneOfDescriptorish{
+			OneofDescriptorProto: oo,
+			qual:                 d.GetFullyQualifiedName(),
+			file:                 d.file,
+		}
+	}
+	return ret
+}
+
+func (d poorMsgDescriptorish) GetExtensionRanges() []extRangeDescriptorish {
+	mdFqn := d.GetFullyQualifiedName()
+	extrs := d.DescriptorProto.GetExtensionRange()
+	ret := make([]extRangeDescriptorish, len(extrs))
+	for i, extr := range extrs {
+		ret[i] = extRangeDescriptorish{
+			er:   extr,
+			qual: mdFqn,
+			file: d.file,
+		}
+	}
+	return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedMessageTypes() []msgDescriptorish {
+	msgs := d.DescriptorProto.GetNestedType()
+	ret := make([]msgDescriptorish, len(msgs))
+	for i, m := range msgs {
+		ret[i] = poorMsgDescriptorish{
+			DescriptorProto: m,
+			qual:            d.GetFullyQualifiedName(),
+			file:            d.file,
+		}
+	}
+	return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedExtensions() []fldDescriptorish {
+	flds := d.DescriptorProto.GetExtension()
+	ret := make([]fldDescriptorish, len(flds))
+	for i, f := range flds {
+		ret[i] = poorFldDescriptorish{
+			FieldDescriptorProto: f,
+			qual:                 d.GetFullyQualifiedName(),
+			file:                 d.file,
+		}
+	}
+	return ret
+}
+
+func (d poorMsgDescriptorish) GetNestedEnumTypes() []enumDescriptorish {
+	ens := d.DescriptorProto.GetEnumType()
+	ret := make([]enumDescriptorish, len(ens))
+	for i, en := range ens {
+		ret[i] = poorEnumDescriptorish{
+			EnumDescriptorProto: en,
+			qual:                d.GetFullyQualifiedName(),
+			file:                d.file,
+		}
+	}
+	return ret
+}
+
+type poorFldDescriptorish struct {
+	*dpb.FieldDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorFldDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorFldDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.FieldDescriptorProto.GetName())
+}
+
+func (d poorFldDescriptorish) AsProto() proto.Message {
+	return d.FieldDescriptorProto
+}
+
+func (d poorFldDescriptorish) GetFieldOptions() *dpb.FieldOptions {
+	return d.FieldDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) GetMessageType() *desc.MessageDescriptor {
+	return nil
+}
+
+func (d poorFldDescriptorish) GetEnumType() *desc.EnumDescriptor {
+	return nil
+}
+
+type poorOneOfDescriptorish struct {
+	*dpb.OneofDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorOneOfDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorOneOfDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.OneofDescriptorProto.GetName())
+}
+
+func (d poorOneOfDescriptorish) AsProto() proto.Message {
+	return d.OneofDescriptorProto
+}
+
+func (d poorOneOfDescriptorish) GetOneOfOptions() *dpb.OneofOptions {
+	return d.OneofDescriptorProto.GetOptions()
+}
+
+func (d poorFldDescriptorish) AsFieldDescriptorProto() *dpb.FieldDescriptorProto {
+	return d.FieldDescriptorProto
+}
+
+type poorEnumDescriptorish struct {
+	*dpb.EnumDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorEnumDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorEnumDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.EnumDescriptorProto.GetName())
+}
+
+func (d poorEnumDescriptorish) AsProto() proto.Message {
+	return d.EnumDescriptorProto
+}
+
+func (d poorEnumDescriptorish) GetEnumOptions() *dpb.EnumOptions {
+	return d.EnumDescriptorProto.GetOptions()
+}
+
+func (d poorEnumDescriptorish) GetValues() []enumValDescriptorish {
+	vals := d.EnumDescriptorProto.GetValue()
+	ret := make([]enumValDescriptorish, len(vals))
+	for i, v := range vals {
+		ret[i] = poorEnumValDescriptorish{
+			EnumValueDescriptorProto: v,
+			qual:                     d.GetFullyQualifiedName(),
+			file:                     d.file,
+		}
+	}
+	return ret
+}
+
+type poorEnumValDescriptorish struct {
+	*dpb.EnumValueDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorEnumValDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorEnumValDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.EnumValueDescriptorProto.GetName())
+}
+
+func (d poorEnumValDescriptorish) AsProto() proto.Message {
+	return d.EnumValueDescriptorProto
+}
+
+func (d poorEnumValDescriptorish) GetEnumValueOptions() *dpb.EnumValueOptions {
+	return d.EnumValueDescriptorProto.GetOptions()
+}
+
+type poorSvcDescriptorish struct {
+	*dpb.ServiceDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorSvcDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorSvcDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.ServiceDescriptorProto.GetName())
+}
+
+func (d poorSvcDescriptorish) AsProto() proto.Message {
+	return d.ServiceDescriptorProto
+}
+
+func (d poorSvcDescriptorish) GetServiceOptions() *dpb.ServiceOptions {
+	return d.ServiceDescriptorProto.GetOptions()
+}
+
+func (d poorSvcDescriptorish) GetMethods() []methodDescriptorish {
+	mtds := d.ServiceDescriptorProto.GetMethod()
+	ret := make([]methodDescriptorish, len(mtds))
+	for i, m := range mtds {
+		ret[i] = poorMethodDescriptorish{
+			MethodDescriptorProto: m,
+			qual:                  d.GetFullyQualifiedName(),
+			file:                  d.file,
+		}
+	}
+	return ret
+}
+
+type poorMethodDescriptorish struct {
+	*dpb.MethodDescriptorProto
+	qual string
+	file fileDescriptorish
+}
+
+func (d poorMethodDescriptorish) GetFile() fileDescriptorish {
+	return d.file
+}
+
+func (d poorMethodDescriptorish) GetFullyQualifiedName() string {
+	return qualify(d.qual, d.MethodDescriptorProto.GetName())
+}
+
+func (d poorMethodDescriptorish) AsProto() proto.Message {
+	return d.MethodDescriptorProto
+}
+
+func (d poorMethodDescriptorish) GetMethodOptions() *dpb.MethodOptions {
+	return d.MethodDescriptorProto.GetOptions()
+}
+
+type extRangeDescriptorish struct {
+	er   *dpb.DescriptorProto_ExtensionRange
+	qual string
+	file fileDescriptorish
+}
+
+func (er extRangeDescriptorish) GetFile() fileDescriptorish {
+	return er.file
+}
+
+func (er extRangeDescriptorish) GetFullyQualifiedName() string {
+	return qualify(er.qual, fmt.Sprintf("%d-%d", er.er.GetStart(), er.er.GetEnd()-1))
+}
+
+func (er extRangeDescriptorish) AsProto() proto.Message {
+	return er.er
+}
+
+func (er extRangeDescriptorish) GetExtensionRangeOptions() *dpb.ExtensionRangeOptions {
+	return er.er.GetOptions()
+}
+
+func interpretFileOptions(r *parseResult, fd fileDescriptorish) error {
+	opts := fd.GetFileOptions()
+	if opts != nil {
+		if len(opts.UninterpretedOption) > 0 {
+			if remain, err := interpretOptions(r, fd, opts, opts.UninterpretedOption); err != nil {
+				return err
+			} else {
+				opts.UninterpretedOption = remain
+			}
+		}
+	}
+	for _, md := range fd.GetMessageTypes() {
+		if err := interpretMessageOptions(r, md); err != nil {
+			return err
+		}
+	}
+	for _, fld := range fd.GetExtensions() {
+		if err := interpretFieldOptions(r, fld); err != nil {
+			return err
+		}
+	}
+	for _, ed := range fd.GetEnumTypes() {
+		if err := interpretEnumOptions(r, ed); err != nil {
+			return err
+		}
+	}
+	for _, sd := range fd.GetServices() {
+		opts := sd.GetServiceOptions()
+		if opts != nil {
+			if len(opts.UninterpretedOption) > 0 {
+				if remain, err := interpretOptions(r, sd, opts, opts.UninterpretedOption); err != nil {
+					return err
+				} else {
+					opts.UninterpretedOption = remain
+				}
+			}
+		}
+		for _, mtd := range sd.GetMethods() {
+			opts := mtd.GetMethodOptions()
+			if opts != nil {
+				if len(opts.UninterpretedOption) > 0 {
+					if remain, err := interpretOptions(r, mtd, opts, opts.UninterpretedOption); err != nil {
+						return err
+					} else {
+						opts.UninterpretedOption = remain
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func interpretMessageOptions(r *parseResult, md msgDescriptorish) error {
+	opts := md.GetMessageOptions()
+	if opts != nil {
+		if len(opts.UninterpretedOption) > 0 {
+			if remain, err := interpretOptions(r, md, opts, opts.UninterpretedOption); err != nil {
+				return err
+			} else {
+				opts.UninterpretedOption = remain
+			}
+		}
+	}
+	for _, fld := range md.GetFields() {
+		if err := interpretFieldOptions(r, fld); err != nil {
+			return err
+		}
+	}
+	for _, ood := range md.GetOneOfs() {
+		opts := ood.GetOneOfOptions()
+		if opts != nil {
+			if len(opts.UninterpretedOption) > 0 {
+				if remain, err := interpretOptions(r, ood, opts, opts.UninterpretedOption); err != nil {
+					return err
+				} else {
+					opts.UninterpretedOption = remain
+				}
+			}
+		}
+	}
+	for _, fld := range md.GetNestedExtensions() {
+		if err := interpretFieldOptions(r, fld); err != nil {
+			return err
+		}
+	}
+	for _, er := range md.GetExtensionRanges() {
+		opts := er.GetExtensionRangeOptions()
+		if opts != nil {
+			if len(opts.UninterpretedOption) > 0 {
+				if remain, err := interpretOptions(r, er, opts, opts.UninterpretedOption); err != nil {
+					return err
+				} else {
+					opts.UninterpretedOption = remain
+				}
+			}
+		}
+	}
+	for _, nmd := range md.GetNestedMessageTypes() {
+		if err := interpretMessageOptions(r, nmd); err != nil {
+			return err
+		}
+	}
+	for _, ed := range md.GetNestedEnumTypes() {
+		if err := interpretEnumOptions(r, ed); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func interpretFieldOptions(r *parseResult, fld fldDescriptorish) error {
+	opts := fld.GetFieldOptions()
+	if opts != nil {
+		if len(opts.UninterpretedOption) > 0 {
+			uo := opts.UninterpretedOption
+			scope := fmt.Sprintf("field %s", fld.GetFullyQualifiedName())
+
+			// process json_name pseudo-option
+			if index, err := findOption(r, scope, uo, "json_name"); err != nil && !r.lenient {
+				return err
+			} else if err == nil && index >= 0 {
+				opt := uo[index]
+				optNode := r.getOptionNode(opt)
+
+				// attribute source code info
+				if on, ok := optNode.(*optionNode); ok {
+					r.interpretedOptions[on] = []int32{-1, internal.Field_jsonNameTag}
+				}
+				uo = removeOption(uo, index)
+				if opt.StringValue == nil {
+					return ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting string value for json_name option", scope)}
+				}
+				fld.AsFieldDescriptorProto().JsonName = proto.String(string(opt.StringValue))
+			}
+
+			// and process default pseudo-option
+			if index, err := processDefaultOption(r, scope, fld, uo); err != nil && !r.lenient {
+				return err
+			} else if err == nil && index >= 0 {
+				// attribute source code info
+				optNode := r.getOptionNode(uo[index])
+				if on, ok := optNode.(*optionNode); ok {
+					r.interpretedOptions[on] = []int32{-1, internal.Field_defaultTag}
+				}
+				uo = removeOption(uo, index)
+			}
+
+			if len(uo) == 0 {
+				// no real options, only pseudo-options above? clear out options
+				fld.AsFieldDescriptorProto().Options = nil
+			} else if remain, err := interpretOptions(r, fld, opts, uo); err != nil {
+				return err
+			} else {
+				opts.UninterpretedOption = remain
+			}
+		}
+	}
+	return nil
+}
+
+func processDefaultOption(res *parseResult, scope string, fld fldDescriptorish, uos []*dpb.UninterpretedOption) (defaultIndex int, err error) {
+	found, err := findOption(res, scope, uos, "default")
+	if err != nil {
+		return -1, err
+	} else if found == -1 {
+		return -1, nil
+	}
+	opt := uos[found]
+	optNode := res.getOptionNode(opt)
+	fdp := fld.AsFieldDescriptorProto()
+	if fdp.GetLabel() == dpb.FieldDescriptorProto_LABEL_REPEATED {
+		return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is repeated", scope)}
+	}
+	if fdp.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP || fdp.GetType() == dpb.FieldDescriptorProto_TYPE_MESSAGE {
+		return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default value cannot be set because field is a message", scope)}
+	}
+	val := optNode.getValue()
+	if _, ok := val.(*aggregateLiteralNode); ok {
+		return -1, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%s: default value cannot be an aggregate", scope)}
+	}
+	mc := &messageContext{
+		res:         res,
+		file:        fld.GetFile(),
+		elementName: fld.GetFullyQualifiedName(),
+		elementType: descriptorType(fld.AsProto()),
+		option:      opt,
+	}
+	v, err := fieldValue(res, mc, fld, val, true)
+	if err != nil {
+		return -1, err
+	}
+	if str, ok := v.(string); ok {
+		fld.AsFieldDescriptorProto().DefaultValue = proto.String(str)
+	} else if b, ok := v.([]byte); ok {
+		fld.AsFieldDescriptorProto().DefaultValue = proto.String(encodeDefaultBytes(b))
+	} else {
+		var flt float64
+		var ok bool
+		if flt, ok = v.(float64); !ok {
+			var flt32 float32
+			if flt32, ok = v.(float32); ok {
+				flt = float64(flt32)
+			}
+		}
+		if ok {
+			if math.IsInf(flt, 1) {
+				fld.AsFieldDescriptorProto().DefaultValue = proto.String("inf")
+			} else if ok && math.IsInf(flt, -1) {
+				fld.AsFieldDescriptorProto().DefaultValue = proto.String("-inf")
+			} else if ok && math.IsNaN(flt) {
+				fld.AsFieldDescriptorProto().DefaultValue = proto.String("nan")
+			} else {
+				fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+			}
+		} else {
+			fld.AsFieldDescriptorProto().DefaultValue = proto.String(fmt.Sprintf("%v", v))
+		}
+	}
+	return found, nil
+}
+
+func encodeDefaultBytes(b []byte) string {
+	var buf bytes.Buffer
+	writeEscapedBytes(&buf, b)
+	return buf.String()
+}
+
+func interpretEnumOptions(r *parseResult, ed enumDescriptorish) error {
+	opts := ed.GetEnumOptions()
+	if opts != nil {
+		if len(opts.UninterpretedOption) > 0 {
+			if remain, err := interpretOptions(r, ed, opts, opts.UninterpretedOption); err != nil {
+				return err
+			} else {
+				opts.UninterpretedOption = remain
+			}
+		}
+	}
+	for _, evd := range ed.GetValues() {
+		opts := evd.GetEnumValueOptions()
+		if opts != nil {
+			if len(opts.UninterpretedOption) > 0 {
+				if remain, err := interpretOptions(r, evd, opts, opts.UninterpretedOption); err != nil {
+					return err
+				} else {
+					opts.UninterpretedOption = remain
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func interpretOptions(res *parseResult, element descriptorish, opts proto.Message, uninterpreted []*dpb.UninterpretedOption) ([]*dpb.UninterpretedOption, error) {
+	optsd, err := desc.LoadMessageDescriptorForMessage(opts)
+	if err != nil {
+		if res.lenient {
+			return uninterpreted, nil
+		}
+		return nil, err
+	}
+	dm := dynamic.NewMessage(optsd)
+	err = dm.ConvertFrom(opts)
+	if err != nil {
+		if res.lenient {
+			return uninterpreted, nil
+		}
+		node := res.nodes[element.AsProto()]
+		return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+	}
+
+	mc := &messageContext{res: res, file: element.GetFile(), elementName: element.GetFullyQualifiedName(), elementType: descriptorType(element.AsProto())}
+	var remain []*dpb.UninterpretedOption
+	for _, uo := range uninterpreted {
+		node := res.getOptionNode(uo)
+		if !uo.Name[0].GetIsExtension() && uo.Name[0].GetNamePart() == "uninterpreted_option" {
+			if res.lenient {
+				remain = append(remain, uo)
+				continue
+			}
+			// uninterpreted_option might be found reflectively, but is not actually valid for use
+			return nil, ErrorWithSourcePos{Pos: node.getName().start(), Underlying: fmt.Errorf("%vinvalid option 'uninterpreted_option'", mc)}
+		}
+		mc.option = uo
+		path, err := interpretField(res, mc, element, dm, uo, 0, nil)
+		if err != nil {
+			if res.lenient {
+				remain = append(remain, uo)
+				continue
+			}
+			return nil, err
+		}
+		if optn, ok := node.(*optionNode); ok {
+			res.interpretedOptions[optn] = path
+		}
+	}
+
+	if err := dm.ValidateRecursive(); err != nil {
+		// if lenient, we'll let this pass, but it means that some required field was not set!
+		// TODO: do this in a more granular way, so we can validate individual fields
+		// and leave them uninterpreted, instead of just having to live with the
+		// thing having invalid data in extensions.
+		if !res.lenient {
+			node := res.nodes[element.AsProto()]
+			return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: fmt.Errorf("error in %s options: %v", descriptorType(element.AsProto()), err)}
+		}
+	}
+
+	if res.lenient {
+		// If we're lenient, then we don't want to clobber the passed in message
+		// and leave it partially populated. So we convert into a copy first
+		optsClone := proto.Clone(opts)
+		if err := dm.ConvertTo(optsClone); err != nil {
+			// TODO: do this in a more granular way, so we can convert individual
+			// fields and leave bad ones uninterpreted instead of skipping all of
+			// the work we've done so far.
+			return uninterpreted, nil
+		}
+		// conversion from dynamic message above worked, so now
+		// it is safe to overwrite the passed in message
+		opts.Reset()
+		proto.Merge(opts, optsClone)
+
+	} else {
+		// not lenient: try to convert into the passed in message
+		// and fail is not successful
+		if err := dm.ConvertTo(opts); err != nil {
+			node := res.nodes[element.AsProto()]
+			return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+		}
+	}
+
+	return remain, nil
+}
+
+func interpretField(res *parseResult, mc *messageContext, element descriptorish, dm *dynamic.Message, opt *dpb.UninterpretedOption, nameIndex int, pathPrefix []int32) (path []int32, err error) {
+	var fld *desc.FieldDescriptor
+	nm := opt.GetName()[nameIndex]
+	node := res.getOptionNamePartNode(nm)
+	if nm.GetIsExtension() {
+		extName := nm.GetNamePart()
+		if extName[0] == '.' {
+			extName = extName[1:] /* skip leading dot */
+		}
+		fld = findExtension(element.GetFile(), extName, false, map[fileDescriptorish]struct{}{})
+		if fld == nil {
+			return nil, ErrorWithSourcePos{
+				Pos: node.start(),
+				Underlying: fmt.Errorf("%vunrecognized extension %s of %s",
+					mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName()),
+			}
+		}
+		if fld.GetOwner().GetFullyQualifiedName() != dm.GetMessageDescriptor().GetFullyQualifiedName() {
+			return nil, ErrorWithSourcePos{
+				Pos: node.start(),
+				Underlying: fmt.Errorf("%vextension %s should extend %s but instead extends %s",
+					mc, extName, dm.GetMessageDescriptor().GetFullyQualifiedName(), fld.GetOwner().GetFullyQualifiedName()),
+			}
+		}
+	} else {
+		fld = dm.GetMessageDescriptor().FindFieldByName(nm.GetNamePart())
+		if fld == nil {
+			return nil, ErrorWithSourcePos{
+				Pos: node.start(),
+				Underlying: fmt.Errorf("%vfield %s of %s does not exist",
+					mc, nm.GetNamePart(), dm.GetMessageDescriptor().GetFullyQualifiedName()),
+			}
+		}
+	}
+
+	path = append(pathPrefix, fld.GetNumber())
+
+	if len(opt.GetName()) > nameIndex+1 {
+		nextnm := opt.GetName()[nameIndex+1]
+		nextnode := res.getOptionNamePartNode(nextnm)
+		if fld.GetType() != dpb.FieldDescriptorProto_TYPE_MESSAGE {
+			return nil, ErrorWithSourcePos{
+				Pos: nextnode.start(),
+				Underlying: fmt.Errorf("%vcannot set field %s because %s is not a message",
+					mc, nextnm.GetNamePart(), nm.GetNamePart()),
+			}
+		}
+		if fld.IsRepeated() {
+			return nil, ErrorWithSourcePos{
+				Pos: nextnode.start(),
+				Underlying: fmt.Errorf("%vcannot set field %s because %s is repeated (must use an aggregate)",
+					mc, nextnm.GetNamePart(), nm.GetNamePart()),
+			}
+		}
+		var fdm *dynamic.Message
+		var err error
+		if dm.HasField(fld) {
+			var v interface{}
+			v, err = dm.TryGetField(fld)
+			fdm, _ = v.(*dynamic.Message)
+		} else {
+			fdm = dynamic.NewMessage(fld.GetMessageType())
+			err = dm.TrySetField(fld, fdm)
+		}
+		if err != nil {
+			return nil, ErrorWithSourcePos{Pos: node.start(), Underlying: err}
+		}
+		// recurse to set next part of name
+		return interpretField(res, mc, element, fdm, opt, nameIndex+1, path)
+	}
+
+	optNode := res.getOptionNode(opt)
+	if err := setOptionField(res, mc, dm, fld, node, optNode.getValue()); err != nil {
+		return nil, err
+	}
+	if fld.IsRepeated() {
+		path = append(path, int32(dm.FieldLength(fld))-1)
+	}
+	return path, nil
+}
+
+func findExtension(fd fileDescriptorish, name string, public bool, checked map[fileDescriptorish]struct{}) *desc.FieldDescriptor {
+	if _, ok := checked[fd]; ok {
+		return nil
+	}
+	checked[fd] = struct{}{}
+	d := fd.FindSymbol(name)
+	if d != nil {
+		if fld, ok := d.(*desc.FieldDescriptor); ok {
+			return fld
+		}
+		return nil
+	}
+
+	// When public = false, we are searching only directly imported symbols. But we
+	// also need to search transitive public imports due to semantics of public imports.
+	if public {
+		for _, dep := range fd.GetPublicDependencies() {
+			d := findExtension(dep, name, true, checked)
+			if d != nil {
+				return d
+			}
+		}
+	} else {
+		for _, dep := range fd.GetDependencies() {
+			d := findExtension(dep, name, true, checked)
+			if d != nil {
+				return d
+			}
+		}
+	}
+	return nil
+}
+
+func setOptionField(res *parseResult, mc *messageContext, dm *dynamic.Message, fld *desc.FieldDescriptor, name node, val valueNode) error {
+	v := val.value()
+	if sl, ok := v.([]valueNode); ok {
+		// handle slices a little differently than the others
+		if !fld.IsRepeated() {
+			return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue is an array but field is not repeated", mc)}
+		}
+		origPath := mc.optAggPath
+		defer func() {
+			mc.optAggPath = origPath
+		}()
+		for index, item := range sl {
+			mc.optAggPath = fmt.Sprintf("%s[%d]", origPath, index)
+			if v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, item, false); err != nil {
+				return err
+			} else if err = dm.TryAddRepeatedField(fld, v); err != nil {
+				return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+			}
+		}
+		return nil
+	}
+
+	v, err := fieldValue(res, mc, richFldDescriptorish{FieldDescriptor: fld}, val, false)
+	if err != nil {
+		return err
+	}
+	if fld.IsRepeated() {
+		err = dm.TryAddRepeatedField(fld, v)
+	} else {
+		if dm.HasField(fld) {
+			return ErrorWithSourcePos{Pos: name.start(), Underlying: fmt.Errorf("%vnon-repeated option field %s already set", mc, fieldName(fld))}
+		}
+		err = dm.TrySetField(fld, v)
+	}
+	if err != nil {
+		return ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%verror setting value: %s", mc, err)}
+	}
+
+	return nil
+}
+
+type messageContext struct {
+	res         *parseResult
+	file        fileDescriptorish
+	elementType string
+	elementName string
+	option      *dpb.UninterpretedOption
+	optAggPath  string
+}
+
+func (c *messageContext) String() string {
+	var ctx bytes.Buffer
+	if c.elementType != "file" {
+		fmt.Fprintf(&ctx, "%s %s: ", c.elementType, c.elementName)
+	}
+	if c.option != nil && c.option.Name != nil {
+		ctx.WriteString("option ")
+		writeOptionName(&ctx, c.option.Name)
+		if c.res.nodes == nil {
+			// if we have no source position info, try to provide as much context
+			// as possible (if nodes != nil, we don't need this because any errors
+			// will actually have file and line numbers)
+			if c.optAggPath != "" {
+				fmt.Fprintf(&ctx, " at %s", c.optAggPath)
+			}
+		}
+		ctx.WriteString(": ")
+	}
+	return ctx.String()
+}
+
+func writeOptionName(buf *bytes.Buffer, parts []*dpb.UninterpretedOption_NamePart) {
+	first := true
+	for _, p := range parts {
+		if first {
+			first = false
+		} else {
+			buf.WriteByte('.')
+		}
+		nm := p.GetNamePart()
+		if nm[0] == '.' {
+			// skip leading dot
+			nm = nm[1:]
+		}
+		if p.GetIsExtension() {
+			buf.WriteByte('(')
+			buf.WriteString(nm)
+			buf.WriteByte(')')
+		} else {
+			buf.WriteString(nm)
+		}
+	}
+}
+
+func fieldName(fld *desc.FieldDescriptor) string {
+	if fld.IsExtension() {
+		return fld.GetFullyQualifiedName()
+	} else {
+		return fld.GetName()
+	}
+}
+
+func valueKind(val interface{}) string {
+	switch val := val.(type) {
+	case identifier:
+		return "identifier"
+	case bool:
+		return "bool"
+	case int64:
+		if val < 0 {
+			return "negative integer"
+		}
+		return "integer"
+	case uint64:
+		return "integer"
+	case float64:
+		return "double"
+	case string, []byte:
+		return "string"
+	case []*aggregateEntryNode:
+		return "message"
+	default:
+		return fmt.Sprintf("%T", val)
+	}
+}
+
+func fieldValue(res *parseResult, mc *messageContext, fld fldDescriptorish, val valueNode, enumAsString bool) (interface{}, error) {
+	v := val.value()
+	t := fld.AsFieldDescriptorProto().GetType()
+	switch t {
+	case dpb.FieldDescriptorProto_TYPE_ENUM:
+		if id, ok := v.(identifier); ok {
+			ev := fld.GetEnumType().FindValueByName(string(id))
+			if ev == nil {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%venum %s has no value named %s", mc, fld.GetEnumType().GetFullyQualifiedName(), id)}
+			}
+			if enumAsString {
+				return ev.GetName(), nil
+			} else {
+				return ev.GetNumber(), nil
+			}
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting enum, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_MESSAGE, dpb.FieldDescriptorProto_TYPE_GROUP:
+		if aggs, ok := v.([]*aggregateEntryNode); ok {
+			fmd := fld.GetMessageType()
+			fdm := dynamic.NewMessage(fmd)
+			origPath := mc.optAggPath
+			defer func() {
+				mc.optAggPath = origPath
+			}()
+			for _, a := range aggs {
+				if origPath == "" {
+					mc.optAggPath = a.name.value()
+				} else {
+					mc.optAggPath = origPath + "." + a.name.value()
+				}
+				var ffld *desc.FieldDescriptor
+				if a.name.isExtension {
+					n := a.name.name.val
+					ffld = findExtension(mc.file, n, false, map[fileDescriptorish]struct{}{})
+					if ffld == nil {
+						// may need to qualify with package name
+						pkg := mc.file.GetPackage()
+						if pkg != "" {
+							ffld = findExtension(mc.file, pkg+"."+n, false, map[fileDescriptorish]struct{}{})
+						}
+					}
+				} else {
+					ffld = fmd.FindFieldByName(a.name.value())
+				}
+				if ffld == nil {
+					return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vfield %s not found", mc, a.name.name.val)}
+				}
+				if err := setOptionField(res, mc, fdm, ffld, a.name, a.val); err != nil {
+					return nil, err
+				}
+			}
+			return fdm, nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting message, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_BOOL:
+		if b, ok := v.(bool); ok {
+			return b, nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bool, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_BYTES:
+		if str, ok := v.(string); ok {
+			return []byte(str), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting bytes, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_STRING:
+		if str, ok := v.(string); ok {
+			return str, nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting string, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_INT32, dpb.FieldDescriptorProto_TYPE_SINT32, dpb.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, ok := v.(int64); ok {
+			if i > math.MaxInt32 || i < math.MinInt32 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, i)}
+			}
+			return int32(i), nil
+		}
+		if ui, ok := v.(uint64); ok {
+			if ui > math.MaxInt32 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int32", mc, ui)}
+			}
+			return int32(ui), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int32, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_UINT32, dpb.FieldDescriptorProto_TYPE_FIXED32:
+		if i, ok := v.(int64); ok {
+			if i > math.MaxUint32 || i < 0 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, i)}
+			}
+			return uint32(i), nil
+		}
+		if ui, ok := v.(uint64); ok {
+			if ui > math.MaxUint32 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint32", mc, ui)}
+			}
+			return uint32(ui), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint32, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_INT64, dpb.FieldDescriptorProto_TYPE_SINT64, dpb.FieldDescriptorProto_TYPE_SFIXED64:
+		if i, ok := v.(int64); ok {
+			return i, nil
+		}
+		if ui, ok := v.(uint64); ok {
+			if ui > math.MaxInt64 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for int64", mc, ui)}
+			}
+			return int64(ui), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting int64, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_UINT64, dpb.FieldDescriptorProto_TYPE_FIXED64:
+		if i, ok := v.(int64); ok {
+			if i < 0 {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %d is out of range for uint64", mc, i)}
+			}
+			return uint64(i), nil
+		}
+		if ui, ok := v.(uint64); ok {
+			return ui, nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting uint64, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_DOUBLE:
+		if d, ok := v.(float64); ok {
+			return d, nil
+		}
+		if i, ok := v.(int64); ok {
+			return float64(i), nil
+		}
+		if u, ok := v.(uint64); ok {
+			return float64(u), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting double, got %s", mc, valueKind(v))}
+	case dpb.FieldDescriptorProto_TYPE_FLOAT:
+		if d, ok := v.(float64); ok {
+			if (d > math.MaxFloat32 || d < -math.MaxFloat32) && !math.IsInf(d, 1) && !math.IsInf(d, -1) && !math.IsNaN(d) {
+				return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vvalue %f is out of range for float", mc, d)}
+			}
+			return float32(d), nil
+		}
+		if i, ok := v.(int64); ok {
+			return float32(i), nil
+		}
+		if u, ok := v.(uint64); ok {
+			return float32(u), nil
+		}
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vexpecting float, got %s", mc, valueKind(v))}
+	default:
+		return nil, ErrorWithSourcePos{Pos: val.start(), Underlying: fmt.Errorf("%vunrecognized field type: %s", mc, t)}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
new file mode 100644
index 0000000..ce9a3e4
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/parser.go
@@ -0,0 +1,1520 @@
+package protoparse
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"os"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+//go:generate goyacc -o proto.y.go -p proto proto.y
+
+var errNoImportPathsForAbsoluteFilePath = errors.New("must specify at least one import path if any absolute file paths are given")
+
+func init() {
+	protoErrorVerbose = true
+
+	// fix up the generated "token name" array so that error messages are nicer
+	setTokenName(_STRING_LIT, "string literal")
+	setTokenName(_INT_LIT, "int literal")
+	setTokenName(_FLOAT_LIT, "float literal")
+	setTokenName(_NAME, "identifier")
+	setTokenName(_FQNAME, "fully-qualified name")
+	setTokenName(_TYPENAME, "type name")
+	setTokenName(_ERROR, "error")
+	// for keywords, just show the keyword itself wrapped in quotes
+	for str, i := range keywords {
+		setTokenName(i, fmt.Sprintf(`"%s"`, str))
+	}
+}
+
+func setTokenName(token int, text string) {
+	// NB: this is based on logic in generated parse code that translates the
+	// int returned from the lexer into an internal token number.
+	var intern int
+	if token < len(protoTok1) {
+		intern = protoTok1[token]
+	} else {
+		if token >= protoPrivate {
+			if token < protoPrivate+len(protoTok2) {
+				intern = protoTok2[token-protoPrivate]
+			}
+		}
+		if intern == 0 {
+			for i := 0; i+1 < len(protoTok3); i += 2 {
+				if protoTok3[i] == token {
+					intern = protoTok3[i+1]
+					break
+				}
+			}
+		}
+	}
+
+	if intern >= 1 && intern-1 < len(protoToknames) {
+		protoToknames[intern-1] = text
+		return
+	}
+
+	panic(fmt.Sprintf("Unknown token value: %d", token))
+}
+
+// FileAccessor is an abstraction for opening proto source files. It takes the
+// name of the file to open and returns either the input reader or an error.
+type FileAccessor func(filename string) (io.ReadCloser, error)
+
+// FileContentsFromMap returns a FileAccessor that uses the given map of file
+// contents. This allows proto source files to be constructed in memory and
+// easily supplied to a parser. The map keys are the paths to the proto source
+// files, and the values are the actual proto source contents.
+func FileContentsFromMap(files map[string]string) FileAccessor {
+	return func(filename string) (io.ReadCloser, error) {
+		contents, ok := files[filename]
+		if !ok {
+			return nil, os.ErrNotExist
+		}
+		return ioutil.NopCloser(strings.NewReader(contents)), nil
+	}
+}
+
+// ResolveFilenames tries to resolve fileNames into paths that are relative to
+// directories in the given importPaths. The returned slice has the results in
+// the same order as they are supplied in fileNames.
+//
+// The resulting names should be suitable for passing to Parser.ParseFiles.
+//
+// If importPaths is empty and any path is absolute, this returns error.
+// If importPaths is empty and all paths are relative, this returns the original fileNames.
+func ResolveFilenames(importPaths []string, fileNames ...string) ([]string, error) {
+	if len(importPaths) == 0 {
+		if containsAbsFilePath(fileNames) {
+			// We have to do this as otherwise parseProtoFiles can result in duplicate symbols.
+			// For example, assume we import "foo/bar/bar.proto" in a file "/home/alice/dev/foo/bar/baz.proto"
+			// as we call ParseFiles("/home/alice/dev/foo/bar/bar.proto","/home/alice/dev/foo/bar/baz.proto")
+			// with "/home/alice/dev" as our current directory. Due to the recursive nature of parseProtoFiles,
+			// it will discover the import "foo/bar/bar.proto" in the input file, and call parse on this,
+			// adding "foo/bar/bar.proto" to the parsed results, as well as "/home/alice/dev/foo/bar/bar.proto"
+			// from the input file list. This will result in a
+			// 'duplicate symbol SYMBOL: already defined as field in "/home/alice/dev/foo/bar/bar.proto'
+			// error being returned from ParseFiles.
+			return nil, errNoImportPathsForAbsoluteFilePath
+		}
+		return fileNames, nil
+	}
+	absImportPaths, err := absoluteFilePaths(importPaths)
+	if err != nil {
+		return nil, err
+	}
+	absFileNames, err := absoluteFilePaths(fileNames)
+	if err != nil {
+		return nil, err
+	}
+	resolvedFileNames := make([]string, 0, len(fileNames))
+	for _, absFileName := range absFileNames {
+		resolvedFileName, err := resolveAbsFilename(absImportPaths, absFileName)
+		if err != nil {
+			return nil, err
+		}
+		resolvedFileNames = append(resolvedFileNames, resolvedFileName)
+	}
+	return resolvedFileNames, nil
+}
+
+// Parser parses proto source into descriptors.
+type Parser struct {
+	// The paths used to search for dependencies that are referenced in import
+	// statements in proto source files. If no import paths are provided then
+	// "." (current directory) is assumed to be the only import path.
+	//
+	// This setting is only used during ParseFiles operations. Since calls to
+	// ParseFilesButDoNotLink do not link, there is no need to load and parse
+	// dependencies.
+	ImportPaths []string
+
+	// If true, the supplied file names/paths need not necessarily match how the
+	// files are referenced in import statements. The parser will attempt to
+	// match import statements to supplied paths, "guessing" the import paths
+	// for the files. Note that this inference is not perfect and link errors
+	// could result. It works best when all proto files are organized such that
+	// a single import path can be inferred (e.g. all files under a single tree
+	// with import statements all being relative to the root of this tree).
+	InferImportPaths bool
+
+	// Used to create a reader for a given filename, when loading proto source
+	// file contents. If unset, os.Open is used. If ImportPaths is also empty
+	// then relative paths are will be relative to the process's current working
+	// directory.
+	Accessor FileAccessor
+
+	// If true, the resulting file descriptors will retain source code info,
+	// that maps elements to their location in the source files as well as
+	// includes comments found during parsing (and attributed to elements of
+	// the source file).
+	IncludeSourceCodeInfo bool
+
+	// If true, the results from ParseFilesButDoNotLink will be passed through
+	// some additional validations. But only constraints that do not require
+	// linking can be checked. These include proto2 vs. proto3 language features,
+	// looking for incorrect usage of reserved names or tags, and ensuring that
+	// fields have unique tags and that enum values have unique numbers (unless
+	// the enum allows aliases).
+	ValidateUnlinkedFiles bool
+
+	// If true, the results from ParseFilesButDoNotLink will have options
+	// interpreted. Any uninterpretable options (including any custom options or
+	// options that refer to message and enum types, which can only be
+	// interpreted after linking) will be left in uninterpreted_options. Also,
+	// the "default" pseudo-option for fields can only be interpreted for scalar
+	// fields, excluding enums. (Interpreting default values for enum fields
+	// requires resolving enum names, which requires linking.)
+	InterpretOptionsInUnlinkedFiles bool
+}
+
+// ParseFiles parses the named files into descriptors. The returned slice has
+// the same number of entries as the give filenames, in the same order. So the
+// first returned descriptor corresponds to the first given name, and so on.
+//
+// All dependencies for all specified files (including transitive dependencies)
+// must be accessible via the parser's Accessor or a link error will occur. The
+// exception to this rule is that files can import standard Google-provided
+// files -- e.g. google/protobuf/*.proto -- without needing to supply sources
+// for these files. Like protoc, this parser has a built-in version of these
+// files it can use if they aren't explicitly supplied.
+func (p Parser) ParseFiles(filenames ...string) ([]*desc.FileDescriptor, error) {
+	accessor := p.Accessor
+	if accessor == nil {
+		accessor = func(name string) (io.ReadCloser, error) {
+			return os.Open(name)
+		}
+	}
+	paths := p.ImportPaths
+	if len(paths) > 0 {
+		acc := accessor
+		accessor = func(name string) (io.ReadCloser, error) {
+			var ret error
+			for _, path := range paths {
+				f, err := acc(filepath.Join(path, name))
+				if err != nil {
+					if ret == nil {
+						ret = err
+					}
+					continue
+				}
+				return f, nil
+			}
+			return nil, ret
+		}
+	}
+
+	protos := map[string]*parseResult{}
+	err := parseProtoFiles(accessor, filenames, true, true, protos)
+	if err != nil {
+		return nil, err
+	}
+	if p.InferImportPaths {
+		protos = fixupFilenames(protos)
+	}
+	linkedProtos, err := newLinker(protos).linkFiles()
+	if err != nil {
+		return nil, err
+	}
+	if p.IncludeSourceCodeInfo {
+		for name, fd := range linkedProtos {
+			pr := protos[name]
+			fd.AsFileDescriptorProto().SourceCodeInfo = pr.generateSourceCodeInfo()
+			internal.RecomputeSourceInfo(fd)
+		}
+	}
+	fds := make([]*desc.FileDescriptor, len(filenames))
+	for i, name := range filenames {
+		fd := linkedProtos[name]
+		fds[i] = fd
+	}
+	return fds, nil
+}
+
+// ParseFilesButDoNotLink parses the named files into descriptor protos. The
+// results are just protos, not fully-linked descriptors. It is possible that
+// descriptors are invalid and still be returned in parsed form without error
+// due to the fact that the linking step is skipped (and thus many validation
+// steps omitted).
+//
+// There are a few side effects to not linking the descriptors:
+//   1. No options will be interpreted. Options can refer to extensions or have
+//      message and enum types. Without linking, these extension and type
+//      references are not resolved, so the options may not be interpretable.
+//      So all options will appear in UninterpretedOption fields of the various
+//      descriptor options messages.
+//   2. Type references will not be resolved. This means that the actual type
+//      names in the descriptors may be unqualified and even relative to the
+//      scope in which the type reference appears. This goes for fields that
+//      have message and enum types. It also applies to methods and their
+//      references to request and response message types.
+//   3. Enum fields are not known. Until a field's type reference is resolved
+//      (during linking), it is not known whether the type refers to a message
+//      or an enum. So all fields with such type references have their Type set
+//      to TYPE_MESSAGE.
+//
+// This method will still validate the syntax of parsed files. If the parser's
+// ValidateUnlinkedFiles field is true, additional checks, beyond syntax will
+// also be performed.
+func (p Parser) ParseFilesButDoNotLink(filenames ...string) ([]*dpb.FileDescriptorProto, error) {
+	accessor := p.Accessor
+	if accessor == nil {
+		accessor = func(name string) (io.ReadCloser, error) {
+			return os.Open(name)
+		}
+	}
+
+	protos := map[string]*parseResult{}
+	err := parseProtoFiles(accessor, filenames, false, p.ValidateUnlinkedFiles, protos)
+	if err != nil {
+		return nil, err
+	}
+	if p.InferImportPaths {
+		protos = fixupFilenames(protos)
+	}
+	fds := make([]*dpb.FileDescriptorProto, len(filenames))
+	for i, name := range filenames {
+		pr := protos[name]
+		fd := pr.fd
+		if p.InterpretOptionsInUnlinkedFiles {
+			pr.lenient = true
+			interpretFileOptions(pr, poorFileDescriptorish{FileDescriptorProto: fd})
+		}
+		if p.IncludeSourceCodeInfo {
+			fd.SourceCodeInfo = pr.generateSourceCodeInfo()
+		}
+		fds[i] = fd
+	}
+	return fds, nil
+}
+
+func containsAbsFilePath(filePaths []string) bool {
+	for _, filePath := range filePaths {
+		if filepath.IsAbs(filePath) {
+			return true
+		}
+	}
+	return false
+}
+
+func absoluteFilePaths(filePaths []string) ([]string, error) {
+	absFilePaths := make([]string, 0, len(filePaths))
+	for _, filePath := range filePaths {
+		absFilePath, err := filepath.Abs(filePath)
+		if err != nil {
+			return nil, err
+		}
+		absFilePaths = append(absFilePaths, absFilePath)
+	}
+	return absFilePaths, nil
+}
+
+func resolveAbsFilename(absImportPaths []string, absFileName string) (string, error) {
+	for _, absImportPath := range absImportPaths {
+		if isDescendant(absImportPath, absFileName) {
+			resolvedPath, err := filepath.Rel(absImportPath, absFileName)
+			if err != nil {
+				return "", err
+			}
+			return resolvedPath, nil
+		}
+	}
+	return "", fmt.Errorf("%s does not reside in any import path", absFileName)
+}
+
+// isDescendant returns true if file is a descendant of dir.
+func isDescendant(dir, file string) bool {
+	dir = filepath.Clean(dir)
+	cur := file
+	for {
+		d := filepath.Dir(cur)
+		if d == dir {
+			return true
+		}
+		if d == "." || d == cur {
+			// we've run out of path elements
+			return false
+		}
+		cur = d
+	}
+}
+
+func fixupFilenames(protos map[string]*parseResult) map[string]*parseResult {
+	// In the event that the given filenames (keys in the supplied map) do not
+	// match the actual paths used in 'import' statements in the files, we try
+	// to revise names in the protos so that they will match and be linkable.
+	revisedProtos := map[string]*parseResult{}
+
+	protoPaths := map[string]struct{}{}
+	// TODO: this is O(n^2) but could likely be O(n) with a clever data structure (prefix tree that is indexed backwards?)
+	importCandidates := map[string]map[string]struct{}{}
+	candidatesAvailable := map[string]struct{}{}
+	for name := range protos {
+		candidatesAvailable[name] = struct{}{}
+		for _, f := range protos {
+			for _, imp := range f.fd.Dependency {
+				if strings.HasSuffix(name, imp) {
+					candidates := importCandidates[imp]
+					if candidates == nil {
+						candidates = map[string]struct{}{}
+						importCandidates[imp] = candidates
+					}
+					candidates[name] = struct{}{}
+				}
+			}
+		}
+	}
+	for imp, candidates := range importCandidates {
+		// if we found multiple possible candidates, use the one that is an exact match
+		// if it exists, and otherwise, guess that it's the shortest path (fewest elements)
+		var best string
+		for c := range candidates {
+			if _, ok := candidatesAvailable[c]; !ok {
+				// already used this candidate and re-written its filename accordingly
+				continue
+			}
+			if c == imp {
+				// exact match!
+				best = c
+				break
+			}
+			if best == "" {
+				best = c
+			} else {
+				// HACK: we can't actually tell which files is supposed to match
+				// this import, so arbitrarily pick the "shorter" one (fewest
+				// path elements) or, on a tie, the lexically earlier one
+				minLen := strings.Count(best, string(filepath.Separator))
+				cLen := strings.Count(c, string(filepath.Separator))
+				if cLen < minLen || (cLen == minLen && c < best) {
+					best = c
+				}
+			}
+		}
+		if best != "" {
+			prefix := best[:len(best)-len(imp)]
+			if len(prefix) > 0 {
+				protoPaths[prefix] = struct{}{}
+			}
+			f := protos[best]
+			f.fd.Name = proto.String(imp)
+			revisedProtos[imp] = f
+			delete(candidatesAvailable, best)
+		}
+	}
+
+	if len(candidatesAvailable) == 0 {
+		return revisedProtos
+	}
+
+	if len(protoPaths) == 0 {
+		for c := range candidatesAvailable {
+			revisedProtos[c] = protos[c]
+		}
+		return revisedProtos
+	}
+
+	// Any remaining candidates are entry-points (not imported by others), so
+	// the best bet to "fixing" their file name is to see if they're in one of
+	// the proto paths we found, and if so strip that prefix.
+	protoPathStrs := make([]string, len(protoPaths))
+	i := 0
+	for p := range protoPaths {
+		protoPathStrs[i] = p
+		i++
+	}
+	sort.Strings(protoPathStrs)
+	// we look at paths in reverse order, so we'll use a longer proto path if
+	// there is more than one match
+	for c := range candidatesAvailable {
+		var imp string
+		for i := len(protoPathStrs) - 1; i >= 0; i-- {
+			p := protoPathStrs[i]
+			if strings.HasPrefix(c, p) {
+				imp = c[len(p):]
+				break
+			}
+		}
+		if imp != "" {
+			f := protos[c]
+			f.fd.Name = proto.String(imp)
+			revisedProtos[imp] = f
+		} else {
+			revisedProtos[c] = protos[c]
+		}
+	}
+
+	return revisedProtos
+}
+
+func parseProtoFiles(acc FileAccessor, filenames []string, recursive, validate bool, parsed map[string]*parseResult) error {
+	for _, name := range filenames {
+		if _, ok := parsed[name]; ok {
+			continue
+		}
+		in, err := acc(name)
+		if err != nil {
+			if d, ok := standardImports[name]; ok {
+				parsed[name] = &parseResult{fd: d}
+				continue
+			}
+			return err
+		}
+		func() {
+			defer in.Close()
+			parsed[name], err = parseProto(name, in, validate)
+		}()
+		if err != nil {
+			return err
+		}
+		if recursive {
+			err = parseProtoFiles(acc, parsed[name].fd.Dependency, true, validate, parsed)
+			if err != nil {
+				return fmt.Errorf("failed to load imports for %q: %s", name, err)
+			}
+		}
+	}
+	return nil
+}
+
+type parseResult struct {
+	// the parsed file descriptor
+	fd *dpb.FileDescriptorProto
+
+	// if set to true, enables lenient interpretation of options, where
+	// unrecognized options will be left uninterpreted instead of resulting in a
+	// link error
+	lenient bool
+
+	// a map of elements in the descriptor to nodes in the AST
+	// (for extracting position information when validating the descriptor)
+	nodes map[proto.Message]node
+
+	// a map of uninterpreted option AST nodes to their relative path
+	// in the resulting options message
+	interpretedOptions map[*optionNode][]int32
+}
+
+func (r *parseResult) getFileNode(f *dpb.FileDescriptorProto) fileDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(f.GetName())}
+	}
+	return r.nodes[f].(fileDecl)
+}
+
+func (r *parseResult) getOptionNode(o *dpb.UninterpretedOption) optionDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[o].(optionDecl)
+}
+
+func (r *parseResult) getOptionNamePartNode(o *dpb.UninterpretedOption_NamePart) node {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[o]
+}
+
+func (r *parseResult) getMessageNode(m *dpb.DescriptorProto) msgDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[m].(msgDecl)
+}
+
+func (r *parseResult) getFieldNode(f *dpb.FieldDescriptorProto) fieldDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[f].(fieldDecl)
+}
+
+func (r *parseResult) getOneOfNode(o *dpb.OneofDescriptorProto) node {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[o]
+}
+
+func (r *parseResult) getExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange) rangeDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[e].(rangeDecl)
+}
+
+func (r *parseResult) getMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange) rangeDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getEnumNode(e *dpb.EnumDescriptorProto) node {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[e]
+}
+
+func (r *parseResult) getEnumValueNode(e *dpb.EnumValueDescriptorProto) enumValueDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[e].(enumValueDecl)
+}
+
+func (r *parseResult) getEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange) rangeDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[rr].(rangeDecl)
+}
+
+func (r *parseResult) getServiceNode(s *dpb.ServiceDescriptorProto) node {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[s]
+}
+
+func (r *parseResult) getMethodNode(m *dpb.MethodDescriptorProto) methodDecl {
+	if r.nodes == nil {
+		return noSourceNode{pos: unknownPos(r.fd.GetName())}
+	}
+	return r.nodes[m].(methodDecl)
+}
+
+func (r *parseResult) putFileNode(f *dpb.FileDescriptorProto, n *fileNode) {
+	r.nodes[f] = n
+}
+
+func (r *parseResult) putOptionNode(o *dpb.UninterpretedOption, n *optionNode) {
+	r.nodes[o] = n
+}
+
+func (r *parseResult) putOptionNamePartNode(o *dpb.UninterpretedOption_NamePart, n *optionNamePartNode) {
+	r.nodes[o] = n
+}
+
+func (r *parseResult) putMessageNode(m *dpb.DescriptorProto, n msgDecl) {
+	r.nodes[m] = n
+}
+
+func (r *parseResult) putFieldNode(f *dpb.FieldDescriptorProto, n fieldDecl) {
+	r.nodes[f] = n
+}
+
+func (r *parseResult) putOneOfNode(o *dpb.OneofDescriptorProto, n *oneOfNode) {
+	r.nodes[o] = n
+}
+
+func (r *parseResult) putExtensionRangeNode(e *dpb.DescriptorProto_ExtensionRange, n *rangeNode) {
+	r.nodes[e] = n
+}
+
+func (r *parseResult) putMessageReservedRangeNode(rr *dpb.DescriptorProto_ReservedRange, n *rangeNode) {
+	r.nodes[rr] = n
+}
+
+func (r *parseResult) putEnumNode(e *dpb.EnumDescriptorProto, n *enumNode) {
+	r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumValueNode(e *dpb.EnumValueDescriptorProto, n *enumValueNode) {
+	r.nodes[e] = n
+}
+
+func (r *parseResult) putEnumReservedRangeNode(rr *dpb.EnumDescriptorProto_EnumReservedRange, n *rangeNode) {
+	r.nodes[rr] = n
+}
+
+func (r *parseResult) putServiceNode(s *dpb.ServiceDescriptorProto, n *serviceNode) {
+	r.nodes[s] = n
+}
+
+func (r *parseResult) putMethodNode(m *dpb.MethodDescriptorProto, n *methodNode) {
+	r.nodes[m] = n
+}
+
+func parseProto(filename string, r io.Reader, validate bool) (*parseResult, error) {
+	lx := newLexer(r)
+	lx.filename = filename
+	protoParse(lx)
+	if lx.err != nil {
+		if _, ok := lx.err.(ErrorWithSourcePos); ok {
+			return nil, lx.err
+		} else {
+			return nil, ErrorWithSourcePos{Pos: lx.prev(), Underlying: lx.err}
+		}
+	}
+	// parser will not return an error if input is empty, so we
+	// need to also check if the result is non-nil
+	if lx.res == nil {
+		return nil, ErrorWithSourcePos{Pos: lx.prev(), Underlying: errors.New("input is empty")}
+	}
+
+	res, err := createParseResult(filename, lx.res)
+	if err != nil {
+		return nil, err
+	}
+	if validate {
+		if err := basicValidate(res); err != nil {
+			return nil, err
+		}
+	}
+	return res, nil
+}
+
+func createParseResult(filename string, file *fileNode) (*parseResult, error) {
+	res := &parseResult{
+		nodes:              map[proto.Message]node{},
+		interpretedOptions: map[*optionNode][]int32{},
+	}
+	err := res.createFileDescriptor(filename, file)
+	return res, err
+}
+
+func (r *parseResult) createFileDescriptor(filename string, file *fileNode) error {
+	fd := &dpb.FileDescriptorProto{Name: proto.String(filename)}
+	r.putFileNode(fd, file)
+
+	isProto3 := false
+	if file.syntax != nil {
+		isProto3 = file.syntax.syntax.val == "proto3"
+		// proto2 is the default, so no need to set unless proto3
+		if isProto3 {
+			fd.Syntax = proto.String(file.syntax.syntax.val)
+		}
+	}
+
+	for _, decl := range file.decls {
+		if decl.enum != nil {
+			fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl.enum))
+		} else if decl.extend != nil {
+			r.addExtensions(decl.extend, &fd.Extension, &fd.MessageType, isProto3)
+		} else if decl.imp != nil {
+			file.imports = append(file.imports, decl.imp)
+			index := len(fd.Dependency)
+			fd.Dependency = append(fd.Dependency, decl.imp.name.val)
+			if decl.imp.public {
+				fd.PublicDependency = append(fd.PublicDependency, int32(index))
+			} else if decl.imp.weak {
+				fd.WeakDependency = append(fd.WeakDependency, int32(index))
+			}
+		} else if decl.message != nil {
+			fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl.message, isProto3))
+		} else if decl.option != nil {
+			if fd.Options == nil {
+				fd.Options = &dpb.FileOptions{}
+			}
+			fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+		} else if decl.service != nil {
+			fd.Service = append(fd.Service, r.asServiceDescriptor(decl.service))
+		} else if decl.pkg != nil {
+			if fd.Package != nil {
+				return ErrorWithSourcePos{Pos: decl.pkg.start(), Underlying: errors.New("files should have only one package declaration")}
+			}
+			file.pkg = decl.pkg
+			fd.Package = proto.String(decl.pkg.name.val)
+		}
+	}
+	r.fd = fd
+	return nil
+}
+
+func (r *parseResult) asUninterpretedOptions(nodes []*optionNode) []*dpb.UninterpretedOption {
+	opts := make([]*dpb.UninterpretedOption, len(nodes))
+	for i, n := range nodes {
+		opts[i] = r.asUninterpretedOption(n)
+	}
+	return opts
+}
+
+func (r *parseResult) asUninterpretedOption(node *optionNode) *dpb.UninterpretedOption {
+	opt := &dpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.name.parts)}
+	r.putOptionNode(opt, node)
+
+	switch val := node.val.value().(type) {
+	case bool:
+		if val {
+			opt.IdentifierValue = proto.String("true")
+		} else {
+			opt.IdentifierValue = proto.String("false")
+		}
+	case int64:
+		opt.NegativeIntValue = proto.Int64(val)
+	case uint64:
+		opt.PositiveIntValue = proto.Uint64(val)
+	case float64:
+		opt.DoubleValue = proto.Float64(val)
+	case string:
+		opt.StringValue = []byte(val)
+	case identifier:
+		opt.IdentifierValue = proto.String(string(val))
+	case []*aggregateEntryNode:
+		var buf bytes.Buffer
+		aggToString(val, &buf)
+		aggStr := buf.String()
+		opt.AggregateValue = proto.String(aggStr)
+	}
+	return opt
+}
+
+func (r *parseResult) asUninterpretedOptionName(parts []*optionNamePartNode) []*dpb.UninterpretedOption_NamePart {
+	ret := make([]*dpb.UninterpretedOption_NamePart, len(parts))
+	for i, part := range parts {
+		txt := part.text.val
+		if !part.isExtension {
+			txt = part.text.val[part.offset : part.offset+part.length]
+		}
+		np := &dpb.UninterpretedOption_NamePart{
+			NamePart:    proto.String(txt),
+			IsExtension: proto.Bool(part.isExtension),
+		}
+		r.putOptionNamePartNode(np, part)
+		ret[i] = np
+	}
+	return ret
+}
+
+func (r *parseResult) addExtensions(ext *extendNode, flds *[]*dpb.FieldDescriptorProto, msgs *[]*dpb.DescriptorProto, isProto3 bool) {
+	extendee := ext.extendee.val
+	for _, decl := range ext.decls {
+		if decl.field != nil {
+			decl.field.extendee = ext
+			fd := r.asFieldDescriptor(decl.field)
+			fd.Extendee = proto.String(extendee)
+			*flds = append(*flds, fd)
+		} else if decl.group != nil {
+			decl.group.extendee = ext
+			fd, md := r.asGroupDescriptors(decl.group, isProto3)
+			fd.Extendee = proto.String(extendee)
+			*flds = append(*flds, fd)
+			*msgs = append(*msgs, md)
+		}
+	}
+}
+
+func asLabel(lbl *labelNode) *dpb.FieldDescriptorProto_Label {
+	if lbl == nil {
+		return nil
+	}
+	switch {
+	case lbl.repeated:
+		return dpb.FieldDescriptorProto_LABEL_REPEATED.Enum()
+	case lbl.required:
+		return dpb.FieldDescriptorProto_LABEL_REQUIRED.Enum()
+	default:
+		return dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+	}
+}
+
+func (r *parseResult) asFieldDescriptor(node *fieldNode) *dpb.FieldDescriptorProto {
+	fd := newFieldDescriptor(node.name.val, node.fldType.val, int32(node.tag.val), asLabel(node.label))
+	r.putFieldNode(fd, node)
+	if len(node.options) > 0 {
+		fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(node.options)}
+	}
+	return fd
+}
+
+func newFieldDescriptor(name string, fieldType string, tag int32, lbl *dpb.FieldDescriptorProto_Label) *dpb.FieldDescriptorProto {
+	fd := &dpb.FieldDescriptorProto{
+		Name:     proto.String(name),
+		JsonName: proto.String(internal.JsonName(name)),
+		Number:   proto.Int32(tag),
+		Label:    lbl,
+	}
+	switch fieldType {
+	case "double":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_DOUBLE.Enum()
+	case "float":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_FLOAT.Enum()
+	case "int32":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_INT32.Enum()
+	case "int64":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_INT64.Enum()
+	case "uint32":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_UINT32.Enum()
+	case "uint64":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_UINT64.Enum()
+	case "sint32":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_SINT32.Enum()
+	case "sint64":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_SINT64.Enum()
+	case "fixed32":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_FIXED32.Enum()
+	case "fixed64":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_FIXED64.Enum()
+	case "sfixed32":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_SFIXED32.Enum()
+	case "sfixed64":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_SFIXED64.Enum()
+	case "bool":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_BOOL.Enum()
+	case "string":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_STRING.Enum()
+	case "bytes":
+		fd.Type = dpb.FieldDescriptorProto_TYPE_BYTES.Enum()
+	default:
+		// NB: we don't have enough info to determine whether this is an enum or a message type,
+		// so we'll change it to enum later once we can ascertain if it's an enum reference
+		fd.Type = dpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
+		fd.TypeName = proto.String(fieldType)
+	}
+	return fd
+}
+
+func (r *parseResult) asGroupDescriptors(group *groupNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+	fieldName := strings.ToLower(group.name.val)
+	fd := &dpb.FieldDescriptorProto{
+		Name:     proto.String(fieldName),
+		JsonName: proto.String(internal.JsonName(fieldName)),
+		Number:   proto.Int32(int32(group.tag.val)),
+		Label:    asLabel(group.label),
+		Type:     dpb.FieldDescriptorProto_TYPE_GROUP.Enum(),
+		TypeName: proto.String(group.name.val),
+	}
+	r.putFieldNode(fd, group)
+	md := &dpb.DescriptorProto{Name: proto.String(group.name.val)}
+	r.putMessageNode(md, group)
+	r.addMessageDecls(md, &group.reserved, group.decls, isProto3)
+	return fd, md
+}
+
+func (r *parseResult) asMapDescriptors(mapField *mapFieldNode, isProto3 bool) (*dpb.FieldDescriptorProto, *dpb.DescriptorProto) {
+	var lbl *dpb.FieldDescriptorProto_Label
+	if !isProto3 {
+		lbl = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+	}
+	keyFd := newFieldDescriptor("key", mapField.keyType.val, 1, lbl)
+	r.putFieldNode(keyFd, mapField.keyField())
+	valFd := newFieldDescriptor("value", mapField.valueType.val, 2, lbl)
+	r.putFieldNode(valFd, mapField.valueField())
+	entryName := internal.InitCap(internal.JsonName(mapField.name.val)) + "Entry"
+	fd := newFieldDescriptor(mapField.name.val, entryName, int32(mapField.tag.val), dpb.FieldDescriptorProto_LABEL_REPEATED.Enum())
+	if len(mapField.options) > 0 {
+		fd.Options = &dpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(mapField.options)}
+	}
+	r.putFieldNode(fd, mapField)
+	md := &dpb.DescriptorProto{
+		Name:    proto.String(entryName),
+		Options: &dpb.MessageOptions{MapEntry: proto.Bool(true)},
+		Field:   []*dpb.FieldDescriptorProto{keyFd, valFd},
+	}
+	r.putMessageNode(md, mapField)
+	return fd, md
+}
+
+func (r *parseResult) asExtensionRanges(node *extensionRangeNode) []*dpb.DescriptorProto_ExtensionRange {
+	opts := r.asUninterpretedOptions(node.options)
+	ers := make([]*dpb.DescriptorProto_ExtensionRange, len(node.ranges))
+	for i, rng := range node.ranges {
+		er := &dpb.DescriptorProto_ExtensionRange{
+			Start: proto.Int32(rng.st),
+			End:   proto.Int32(rng.en + 1),
+		}
+		if len(opts) > 0 {
+			er.Options = &dpb.ExtensionRangeOptions{UninterpretedOption: opts}
+		}
+		r.putExtensionRangeNode(er, rng)
+		ers[i] = er
+	}
+	return ers
+}
+
+func (r *parseResult) asEnumValue(ev *enumValueNode) *dpb.EnumValueDescriptorProto {
+	var num int32
+	if ev.numberP != nil {
+		num = int32(ev.numberP.val)
+	} else {
+		num = int32(ev.numberN.val)
+	}
+	evd := &dpb.EnumValueDescriptorProto{Name: proto.String(ev.name.val), Number: proto.Int32(num)}
+	r.putEnumValueNode(evd, ev)
+	if len(ev.options) > 0 {
+		evd.Options = &dpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(ev.options)}
+	}
+	return evd
+}
+
+func (r *parseResult) asMethodDescriptor(node *methodNode) *dpb.MethodDescriptorProto {
+	md := &dpb.MethodDescriptorProto{
+		Name:       proto.String(node.name.val),
+		InputType:  proto.String(node.input.msgType.val),
+		OutputType: proto.String(node.output.msgType.val),
+	}
+	r.putMethodNode(md, node)
+	if node.input.streamKeyword != nil {
+		md.ClientStreaming = proto.Bool(true)
+	}
+	if node.output.streamKeyword != nil {
+		md.ServerStreaming = proto.Bool(true)
+	}
+	// protoc always adds a MethodOptions if there are brackets
+	// We have a non-nil node.options if there are brackets
+	// We do the same to match protoc as closely as possible
+	// https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152
+	if node.options != nil {
+		md.Options = &dpb.MethodOptions{UninterpretedOption: r.asUninterpretedOptions(node.options)}
+	}
+	return md
+}
+
+func (r *parseResult) asEnumDescriptor(en *enumNode) *dpb.EnumDescriptorProto {
+	ed := &dpb.EnumDescriptorProto{Name: proto.String(en.name.val)}
+	r.putEnumNode(ed, en)
+	for _, decl := range en.decls {
+		if decl.option != nil {
+			if ed.Options == nil {
+				ed.Options = &dpb.EnumOptions{}
+			}
+			ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+		} else if decl.value != nil {
+			ed.Value = append(ed.Value, r.asEnumValue(decl.value))
+		} else if decl.reserved != nil {
+			for _, n := range decl.reserved.names {
+				en.reserved = append(en.reserved, n)
+				ed.ReservedName = append(ed.ReservedName, n.val)
+			}
+			for _, rng := range decl.reserved.ranges {
+				ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng))
+			}
+		}
+	}
+	return ed
+}
+
+func (r *parseResult) asEnumReservedRange(rng *rangeNode) *dpb.EnumDescriptorProto_EnumReservedRange {
+	rr := &dpb.EnumDescriptorProto_EnumReservedRange{
+		Start: proto.Int32(rng.st),
+		End:   proto.Int32(rng.en),
+	}
+	r.putEnumReservedRangeNode(rr, rng)
+	return rr
+}
+
+func (r *parseResult) asMessageDescriptor(node *messageNode, isProto3 bool) *dpb.DescriptorProto {
+	msgd := &dpb.DescriptorProto{Name: proto.String(node.name.val)}
+	r.putMessageNode(msgd, node)
+	r.addMessageDecls(msgd, &node.reserved, node.decls, isProto3)
+	return msgd
+}
+
+func (r *parseResult) addMessageDecls(msgd *dpb.DescriptorProto, reservedNames *[]*stringLiteralNode, decls []*messageElement, isProto3 bool) {
+	for _, decl := range decls {
+		if decl.enum != nil {
+			msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl.enum))
+		} else if decl.extend != nil {
+			r.addExtensions(decl.extend, &msgd.Extension, &msgd.NestedType, isProto3)
+		} else if decl.extensionRange != nil {
+			msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl.extensionRange)...)
+		} else if decl.field != nil {
+			msgd.Field = append(msgd.Field, r.asFieldDescriptor(decl.field))
+		} else if decl.mapField != nil {
+			fd, md := r.asMapDescriptors(decl.mapField, isProto3)
+			msgd.Field = append(msgd.Field, fd)
+			msgd.NestedType = append(msgd.NestedType, md)
+		} else if decl.group != nil {
+			fd, md := r.asGroupDescriptors(decl.group, isProto3)
+			msgd.Field = append(msgd.Field, fd)
+			msgd.NestedType = append(msgd.NestedType, md)
+		} else if decl.oneOf != nil {
+			oodIndex := len(msgd.OneofDecl)
+			ood := &dpb.OneofDescriptorProto{Name: proto.String(decl.oneOf.name.val)}
+			r.putOneOfNode(ood, decl.oneOf)
+			msgd.OneofDecl = append(msgd.OneofDecl, ood)
+			for _, oodecl := range decl.oneOf.decls {
+				if oodecl.option != nil {
+					if ood.Options == nil {
+						ood.Options = &dpb.OneofOptions{}
+					}
+					ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl.option))
+				} else if oodecl.field != nil {
+					fd := r.asFieldDescriptor(oodecl.field)
+					fd.OneofIndex = proto.Int32(int32(oodIndex))
+					msgd.Field = append(msgd.Field, fd)
+				}
+			}
+		} else if decl.option != nil {
+			if msgd.Options == nil {
+				msgd.Options = &dpb.MessageOptions{}
+			}
+			msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+		} else if decl.nested != nil {
+			msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl.nested, isProto3))
+		} else if decl.reserved != nil {
+			for _, n := range decl.reserved.names {
+				*reservedNames = append(*reservedNames, n)
+				msgd.ReservedName = append(msgd.ReservedName, n.val)
+			}
+			for _, rng := range decl.reserved.ranges {
+				msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng))
+			}
+		}
+	}
+}
+
+func (r *parseResult) asMessageReservedRange(rng *rangeNode) *dpb.DescriptorProto_ReservedRange {
+	rr := &dpb.DescriptorProto_ReservedRange{
+		Start: proto.Int32(rng.st),
+		End:   proto.Int32(rng.en + 1),
+	}
+	r.putMessageReservedRangeNode(rr, rng)
+	return rr
+}
+
+func (r *parseResult) asServiceDescriptor(svc *serviceNode) *dpb.ServiceDescriptorProto {
+	sd := &dpb.ServiceDescriptorProto{Name: proto.String(svc.name.val)}
+	r.putServiceNode(sd, svc)
+	for _, decl := range svc.decls {
+		if decl.option != nil {
+			if sd.Options == nil {
+				sd.Options = &dpb.ServiceOptions{}
+			}
+			sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl.option))
+		} else if decl.rpc != nil {
+			sd.Method = append(sd.Method, r.asMethodDescriptor(decl.rpc))
+		}
+	}
+	return sd
+}
+
+func toNameParts(ident *identNode, offset int) []*optionNamePartNode {
+	parts := strings.Split(ident.val[offset:], ".")
+	ret := make([]*optionNamePartNode, len(parts))
+	for i, p := range parts {
+		ret[i] = &optionNamePartNode{text: ident, offset: offset, length: len(p)}
+		ret[i].setRange(ident, ident)
+		offset += len(p) + 1
+	}
+	return ret
+}
+
+func checkUint64InInt32Range(lex protoLexer, pos *SourcePos, v uint64) {
+	if v > math.MaxInt32 {
+		lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+	}
+}
+
+func checkInt64InInt32Range(lex protoLexer, pos *SourcePos, v int64) {
+	if v > math.MaxInt32 || v < math.MinInt32 {
+		lexError(lex, pos, fmt.Sprintf("constant %d is out of range for int32 (%d to %d)", v, math.MinInt32, math.MaxInt32))
+	}
+}
+
+func checkTag(lex protoLexer, pos *SourcePos, v uint64) {
+	if v > internal.MaxTag {
+		lexError(lex, pos, fmt.Sprintf("tag number %d is higher than max allowed tag number (%d)", v, internal.MaxTag))
+	} else if v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd {
+		lexError(lex, pos, fmt.Sprintf("tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd))
+	}
+}
+
+func aggToString(agg []*aggregateEntryNode, buf *bytes.Buffer) {
+	buf.WriteString("{")
+	for _, a := range agg {
+		buf.WriteString(" ")
+		buf.WriteString(a.name.value())
+		if v, ok := a.val.(*aggregateLiteralNode); ok {
+			aggToString(v.elements, buf)
+		} else {
+			buf.WriteString(": ")
+			elementToString(a.val.value(), buf)
+		}
+	}
+	buf.WriteString(" }")
+}
+
+func elementToString(v interface{}, buf *bytes.Buffer) {
+	switch v := v.(type) {
+	case bool, int64, uint64, identifier:
+		fmt.Fprintf(buf, "%v", v)
+	case float64:
+		if math.IsInf(v, 1) {
+			buf.WriteString(": inf")
+		} else if math.IsInf(v, -1) {
+			buf.WriteString(": -inf")
+		} else if math.IsNaN(v) {
+			buf.WriteString(": nan")
+		} else {
+			fmt.Fprintf(buf, ": %v", v)
+		}
+	case string:
+		buf.WriteRune('"')
+		writeEscapedBytes(buf, []byte(v))
+		buf.WriteRune('"')
+	case []valueNode:
+		buf.WriteString(": [")
+		first := true
+		for _, e := range v {
+			if first {
+				first = false
+			} else {
+				buf.WriteString(", ")
+			}
+			elementToString(e.value(), buf)
+		}
+		buf.WriteString("]")
+	case []*aggregateEntryNode:
+		aggToString(v, buf)
+	}
+}
+
+func writeEscapedBytes(buf *bytes.Buffer, b []byte) {
+	for _, c := range b {
+		switch c {
+		case '\n':
+			buf.WriteString("\\n")
+		case '\r':
+			buf.WriteString("\\r")
+		case '\t':
+			buf.WriteString("\\t")
+		case '"':
+			buf.WriteString("\\\"")
+		case '\'':
+			buf.WriteString("\\'")
+		case '\\':
+			buf.WriteString("\\\\")
+		default:
+			if c >= 0x20 && c <= 0x7f && c != '"' && c != '\\' {
+				// simple printable characters
+				buf.WriteByte(c)
+			} else {
+				// use octal escape for all other values
+				buf.WriteRune('\\')
+				buf.WriteByte('0' + ((c >> 6) & 0x7))
+				buf.WriteByte('0' + ((c >> 3) & 0x7))
+				buf.WriteByte('0' + (c & 0x7))
+			}
+		}
+	}
+}
+
+func basicValidate(res *parseResult) error {
+	fd := res.fd
+	isProto3 := fd.GetSyntax() == "proto3"
+
+	for _, md := range fd.MessageType {
+		if err := validateMessage(res, isProto3, "", md); err != nil {
+			return err
+		}
+	}
+
+	for _, ed := range fd.EnumType {
+		if err := validateEnum(res, isProto3, "", ed); err != nil {
+			return err
+		}
+	}
+
+	for _, fld := range fd.Extension {
+		if err := validateField(res, isProto3, "", fld); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func validateMessage(res *parseResult, isProto3 bool, prefix string, md *dpb.DescriptorProto) error {
+	nextPrefix := md.GetName() + "."
+
+	for _, fld := range md.Field {
+		if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+			return err
+		}
+	}
+	for _, fld := range md.Extension {
+		if err := validateField(res, isProto3, nextPrefix, fld); err != nil {
+			return err
+		}
+	}
+	for _, ed := range md.EnumType {
+		if err := validateEnum(res, isProto3, nextPrefix, ed); err != nil {
+			return err
+		}
+	}
+	for _, nmd := range md.NestedType {
+		if err := validateMessage(res, isProto3, nextPrefix, nmd); err != nil {
+			return err
+		}
+	}
+
+	scope := fmt.Sprintf("message %s%s", prefix, md.GetName())
+
+	if isProto3 && len(md.ExtensionRange) > 0 {
+		n := res.getExtensionRangeNode(md.ExtensionRange[0])
+		return ErrorWithSourcePos{Pos: n.start(), Underlying: fmt.Errorf("%s: extension ranges are not allowed in proto3", scope)}
+	}
+
+	if index, err := findOption(res, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil {
+		return err
+	} else if index >= 0 {
+		opt := md.Options.UninterpretedOption[index]
+		optn := res.getOptionNode(opt)
+		md.Options.UninterpretedOption = removeOption(md.Options.UninterpretedOption, index)
+		valid := false
+		if opt.IdentifierValue != nil {
+			if opt.GetIdentifierValue() == "true" {
+				return ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: map_entry option should not be set explicitly; use map type instead", scope)}
+			} else if opt.GetIdentifierValue() == "false" {
+				md.Options.MapEntry = proto.Bool(false)
+				valid = true
+			}
+		}
+		if !valid {
+			return ErrorWithSourcePos{Pos: optn.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for map_entry option", scope)}
+		}
+	}
+
+	// reserved ranges should not overlap
+	rsvd := make(tagRanges, len(md.ReservedRange))
+	for i, r := range md.ReservedRange {
+		n := res.getMessageReservedRangeNode(r)
+		rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+
+	}
+	sort.Sort(rsvd)
+	for i := 1; i < len(rsvd); i++ {
+		if rsvd[i].start < rsvd[i-1].end {
+			return ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1)}
+		}
+	}
+
+	// extensions ranges should not overlap
+	exts := make(tagRanges, len(md.ExtensionRange))
+	for i, r := range md.ExtensionRange {
+		n := res.getExtensionRangeNode(r)
+		exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+	}
+	sort.Sort(exts)
+	for i := 1; i < len(exts); i++ {
+		if exts[i].start < exts[i-1].end {
+			return ErrorWithSourcePos{Pos: exts[i].node.start(), Underlying: fmt.Errorf("%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1)}
+		}
+	}
+
+	// see if any extension range overlaps any reserved range
+	var i, j int // i indexes rsvd; j indexes exts
+	for i < len(rsvd) && j < len(exts) {
+		if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end ||
+			exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end {
+
+			var pos *SourcePos
+			if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end {
+				pos = rsvd[i].node.start()
+			} else {
+				pos = exts[j].node.start()
+			}
+			// ranges overlap
+			return ErrorWithSourcePos{Pos: pos, Underlying: fmt.Errorf("%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1)}
+		}
+		if rsvd[i].start < exts[j].start {
+			i++
+		} else {
+			j++
+		}
+	}
+
+	// now, check that fields don't re-use tags and don't try to use extension
+	// or reserved ranges or reserved names
+	rsvdNames := map[string]struct{}{}
+	for _, n := range md.ReservedName {
+		rsvdNames[n] = struct{}{}
+	}
+	fieldTags := map[int32]string{}
+	for _, fld := range md.Field {
+		fn := res.getFieldNode(fld)
+		if _, ok := rsvdNames[fld.GetName()]; ok {
+			return ErrorWithSourcePos{Pos: fn.fieldName().start(), Underlying: fmt.Errorf("%s: field %s is using a reserved name", scope, fld.GetName())}
+		}
+		if existing := fieldTags[fld.GetNumber()]; existing != "" {
+			return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber())}
+		}
+		fieldTags[fld.GetNumber()] = fld.GetName()
+		// check reserved ranges
+		r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() })
+		if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() {
+			return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1)}
+		}
+		// and check extension ranges
+		e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() })
+		if e < len(exts) && exts[e].start <= fld.GetNumber() {
+			return ErrorWithSourcePos{Pos: fn.fieldTag().start(), Underlying: fmt.Errorf("%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1)}
+		}
+	}
+
+	return nil
+}
+
+func validateEnum(res *parseResult, isProto3 bool, prefix string, ed *dpb.EnumDescriptorProto) error {
+	scope := fmt.Sprintf("enum %s%s", prefix, ed.GetName())
+
+	if index, err := findOption(res, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil {
+		return err
+	} else if index >= 0 {
+		opt := ed.Options.UninterpretedOption[index]
+		ed.Options.UninterpretedOption = removeOption(ed.Options.UninterpretedOption, index)
+		valid := false
+		if opt.IdentifierValue != nil {
+			if opt.GetIdentifierValue() == "true" {
+				ed.Options.AllowAlias = proto.Bool(true)
+				valid = true
+			} else if opt.GetIdentifierValue() == "false" {
+				ed.Options.AllowAlias = proto.Bool(false)
+				valid = true
+			}
+		}
+		if !valid {
+			optNode := res.getOptionNode(opt)
+			return ErrorWithSourcePos{Pos: optNode.getValue().start(), Underlying: fmt.Errorf("%s: expecting bool value for allow_alias option", scope)}
+		}
+	}
+
+	if isProto3 && ed.Value[0].GetNumber() != 0 {
+		evNode := res.getEnumValueNode(ed.Value[0])
+		return ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: proto3 requires that first value in enum have numeric value of 0", scope)}
+	}
+
+	if !ed.Options.GetAllowAlias() {
+		// make sure all value numbers are distinct
+		vals := map[int32]string{}
+		for _, evd := range ed.Value {
+			if existing := vals[evd.GetNumber()]; existing != "" {
+				evNode := res.getEnumValueNode(evd)
+				return ErrorWithSourcePos{Pos: evNode.getNumber().start(), Underlying: fmt.Errorf("%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber())}
+			}
+			vals[evd.GetNumber()] = evd.GetName()
+		}
+	}
+
+	// reserved ranges should not overlap
+	rsvd := make(tagRanges, len(ed.ReservedRange))
+	for i, r := range ed.ReservedRange {
+		n := res.getEnumReservedRangeNode(r)
+		rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
+	}
+	sort.Sort(rsvd)
+	for i := 1; i < len(rsvd); i++ {
+		if rsvd[i].start <= rsvd[i-1].end {
+			return ErrorWithSourcePos{Pos: rsvd[i].node.start(), Underlying: fmt.Errorf("%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end)}
+		}
+	}
+
+	// now, check that fields don't re-use tags and don't try to use extension
+	// or reserved ranges or reserved names
+	rsvdNames := map[string]struct{}{}
+	for _, n := range ed.ReservedName {
+		rsvdNames[n] = struct{}{}
+	}
+	for _, ev := range ed.Value {
+		evn := res.getEnumValueNode(ev)
+		if _, ok := rsvdNames[ev.GetName()]; ok {
+			return ErrorWithSourcePos{Pos: evn.getName().start(), Underlying: fmt.Errorf("%s: value %s is using a reserved name", scope, ev.GetName())}
+		}
+		// check reserved ranges
+		r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() })
+		if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() {
+			return ErrorWithSourcePos{Pos: evn.getNumber().start(), Underlying: fmt.Errorf("%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end)}
+		}
+	}
+
+	return nil
+}
+
+func validateField(res *parseResult, isProto3 bool, prefix string, fld *dpb.FieldDescriptorProto) error {
+	scope := fmt.Sprintf("field %s%s", prefix, fld.GetName())
+
+	node := res.getFieldNode(fld)
+	if isProto3 {
+		if fld.GetType() == dpb.FieldDescriptorProto_TYPE_GROUP {
+			n := node.(*groupNode)
+			return ErrorWithSourcePos{Pos: n.groupKeyword.start(), Underlying: fmt.Errorf("%s: groups are not allowed in proto3", scope)}
+		}
+		if fld.Label != nil && fld.GetLabel() != dpb.FieldDescriptorProto_LABEL_REPEATED {
+			return ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: field has label %v, but proto3 should omit labels other than 'repeated'", scope, fld.GetLabel())}
+		}
+		if index, err := findOption(res, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil {
+			return err
+		} else if index >= 0 {
+			optNode := res.getOptionNode(fld.Options.GetUninterpretedOption()[index])
+			return ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: default values are not allowed in proto3", scope)}
+		}
+	} else {
+		if fld.Label == nil && fld.OneofIndex == nil {
+			return ErrorWithSourcePos{Pos: node.fieldName().start(), Underlying: fmt.Errorf("%s: field has no label, but proto2 must indicate 'optional' or 'required'", scope)}
+		}
+		if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == dpb.FieldDescriptorProto_LABEL_REQUIRED {
+			return ErrorWithSourcePos{Pos: node.fieldLabel().start(), Underlying: fmt.Errorf("%s: extension fields cannot be 'required'", scope)}
+		}
+	}
+
+	// finally, set any missing label to optional
+	if fld.Label == nil {
+		fld.Label = dpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
+	}
+	return nil
+}
+
+func findOption(res *parseResult, scope string, opts []*dpb.UninterpretedOption, name string) (int, error) {
+	found := -1
+	for i, opt := range opts {
+		if len(opt.Name) != 1 {
+			continue
+		}
+		if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name {
+			continue
+		}
+		if found >= 0 {
+			optNode := res.getOptionNode(opt)
+			return -1, ErrorWithSourcePos{Pos: optNode.getName().start(), Underlying: fmt.Errorf("%s: option %s cannot be defined more than once", scope, name)}
+		}
+		found = i
+	}
+	return found, nil
+}
+
+func removeOption(uo []*dpb.UninterpretedOption, indexToRemove int) []*dpb.UninterpretedOption {
+	if indexToRemove == 0 {
+		return uo[1:]
+	} else if int(indexToRemove) == len(uo)-1 {
+		return uo[:len(uo)-1]
+	} else {
+		return append(uo[:indexToRemove], uo[indexToRemove+1:]...)
+	}
+}
+
+type tagRange struct {
+	start int32
+	end   int32
+	node  rangeDecl
+}
+
+type tagRanges []tagRange
+
+func (r tagRanges) Len() int {
+	return len(r)
+}
+
+func (r tagRanges) Less(i, j int) bool {
+	return r[i].start < r[j].start ||
+		(r[i].start == r[j].start && r[i].end < r[j].end)
+}
+
+func (r tagRanges) Swap(i, j int) {
+	r[i], r[j] = r[j], r[i]
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
new file mode 100644
index 0000000..faf49d9
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y
@@ -0,0 +1,937 @@
+%{
+package protoparse
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+	"fmt"
+	"math"
+	"unicode"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+%}
+
+// fields inside this union end up as the fields in a structure known
+// as ${PREFIX}SymType, of which a reference is passed to the lexer.
+%union{
+	file      *fileNode
+	fileDecls []*fileElement
+	syn       *syntaxNode
+	pkg       *packageNode
+	imprt     *importNode
+	msg       *messageNode
+	msgDecls  []*messageElement
+	fld       *fieldNode
+	mapFld    *mapFieldNode
+	grp       *groupNode
+	oo        *oneOfNode
+	ooDecls   []*oneOfElement
+	ext       *extensionRangeNode
+	resvd     *reservedNode
+	en        *enumNode
+	enDecls   []*enumElement
+	env       *enumValueNode
+	extend    *extendNode
+	extDecls  []*extendElement
+	svc       *serviceNode
+	svcDecls  []*serviceElement
+	mtd       *methodNode
+	rpcType   *rpcTypeNode
+	opts      []*optionNode
+	optNm     []*optionNamePartNode
+	rngs      []*rangeNode
+	names     []*stringLiteralNode
+	sl        []valueNode
+	agg       []*aggregateEntryNode
+	aggName   *aggregateNameNode
+	v         valueNode
+	str       *stringLiteralNode
+	i         *negativeIntLiteralNode
+	ui        *intLiteralNode
+	f         *floatLiteralNode
+	id        *identNode
+	b         *basicNode
+	err       error
+}
+
+// any non-terminal which returns a value needs a type, which is
+// really a field name in the above union struct
+%type <file>      file
+%type <syn>       syntax
+%type <fileDecls> fileDecl fileDecls
+%type <imprt>     import
+%type <pkg>       package
+%type <opts>      option fieldOption fieldOptions rpcOption rpcOptions
+%type <optNm>     optionName optionNameRest optionNameComponent
+%type <v>         constant scalarConstant aggregate
+%type <id>        name ident typeIdent keyType
+%type <aggName>   aggName
+%type <i>         negIntLit
+%type <ui>        intLit
+%type <f>         floatLit
+%type <sl>        constantList
+%type <agg>       aggFields aggField aggFieldEntry
+%type <fld>       field oneofField
+%type <oo>        oneof
+%type <grp>       group
+%type <mapFld>    mapField
+%type <msg>       message
+%type <msgDecls>  messageItem messageBody
+%type <ooDecls>   oneofItem oneofBody
+%type <names>     fieldNames
+%type <resvd>     msgReserved enumReserved reservedNames
+%type <rngs>      tagRange tagRanges enumRange enumRanges
+%type <ext>       extensions
+%type <en>        enum
+%type <enDecls>   enumItem enumBody
+%type <env>       enumField
+%type <extend>    extend
+%type <extDecls>  extendItem extendBody
+%type <str>       stringLit
+%type <svc>       service
+%type <svcDecls>  serviceItem serviceBody
+%type <mtd>       rpc
+%type <rpcType>   rpcType
+
+// same for terminals
+%token <str> _STRING_LIT
+%token <ui>  _INT_LIT
+%token <f>   _FLOAT_LIT
+%token <id>  _NAME _FQNAME _TYPENAME
+%token <id>  _SYNTAX _IMPORT _WEAK _PUBLIC _PACKAGE _OPTION _TRUE _FALSE _INF _NAN _REPEATED _OPTIONAL _REQUIRED
+%token <id>  _DOUBLE _FLOAT _INT32 _INT64 _UINT32 _UINT64 _SINT32 _SINT64 _FIXED32 _FIXED64 _SFIXED32 _SFIXED64
+%token <id>  _BOOL _STRING _BYTES _GROUP _ONEOF _MAP _EXTENSIONS _TO _MAX _RESERVED _ENUM _MESSAGE _EXTEND
+%token <id>  _SERVICE _RPC _STREAM _RETURNS
+%token <err> _ERROR
+// we define all of these, even ones that aren't used, to improve error messages
+// so it shows the unexpected symbol instead of showing "$unk"
+%token <b>   '=' ';' ':' '{' '}' '\\' '/' '?' '.' ',' '>' '<' '+' '-' '(' ')' '[' ']' '*' '&' '^' '%' '$' '#' '@' '!' '~' '`'
+
+%%
+
+file : syntax {
+		$$ = &fileNode{syntax: $1}
+		$$.setRange($1, $1)
+		protolex.(*protoLex).res = $$
+	}
+	| fileDecls  {
+		$$ = &fileNode{decls: $1}
+		if len($1) > 0 {
+			$$.setRange($1[0], $1[len($1)-1])
+		}
+		protolex.(*protoLex).res = $$
+	}
+	| syntax fileDecls {
+		$$ = &fileNode{syntax: $1, decls: $2}
+		var end node
+		if len($2) > 0 {
+			end = $2[len($2)-1]
+		} else {
+			end = $1
+		}
+		$$.setRange($1, end)
+		protolex.(*protoLex).res = $$
+	}
+	| {
+	}
+
+fileDecls : fileDecls fileDecl {
+		$$ = append($1, $2...)
+	}
+	| fileDecl
+
+fileDecl : import {
+		$$ = []*fileElement{{imp: $1}}
+	}
+	| package {
+		$$ = []*fileElement{{pkg: $1}}
+	}
+	| option {
+		$$ = []*fileElement{{option: $1[0]}}
+	}
+	| message {
+		$$ = []*fileElement{{message: $1}}
+	}
+	| enum {
+		$$ = []*fileElement{{enum: $1}}
+	}
+	| extend {
+		$$ = []*fileElement{{extend: $1}}
+	}
+	| service {
+		$$ = []*fileElement{{service: $1}}
+	}
+	| ';' {
+		$$ = []*fileElement{{empty: $1}}
+	}
+
+syntax : _SYNTAX '=' stringLit ';' {
+		if $3.val != "proto2" && $3.val != "proto3" {
+			lexError(protolex, $3.start(), "syntax value must be 'proto2' or 'proto3'")
+		}
+		$$ = &syntaxNode{syntax: $3}
+		$$.setRange($1, $4)
+	}
+
+import : _IMPORT stringLit ';' {
+		$$ = &importNode{ name: $2 }
+		$$.setRange($1, $3)
+	}
+	| _IMPORT _WEAK stringLit ';' {
+		$$ = &importNode{ name: $3, weak: true }
+		$$.setRange($1, $4)
+	}
+	| _IMPORT _PUBLIC stringLit ';' {
+		$$ = &importNode{ name: $3, public: true }
+		$$.setRange($1, $4)
+	}
+
+package : _PACKAGE ident ';' {
+		$$ = &packageNode{name: $2}
+		$$.setRange($1, $3)
+	}
+
+ident : name
+	| _FQNAME
+
+option : _OPTION optionName '=' constant ';' {
+		n := &optionNameNode{parts: $2}
+		n.setRange($2[0], $2[len($2)-1])
+		o := &optionNode{name: n, val: $4}
+		o.setRange($1, $5)
+		$$ = []*optionNode{o}
+	}
+
+optionName : ident {
+		$$ = toNameParts($1, 0)
+	}
+	| '(' typeIdent ')' {
+		p := &optionNamePartNode{text: $2, isExtension: true}
+		p.setRange($1, $3)
+		$$ = []*optionNamePartNode{p}
+	}
+	| '(' typeIdent ')' optionNameRest {
+		p := &optionNamePartNode{text: $2, isExtension: true}
+		p.setRange($1, $3)
+		ps := make([]*optionNamePartNode, 1, len($4)+1)
+		ps[0] = p
+		$$ = append(ps, $4...)
+	}
+
+optionNameRest : optionNameComponent
+	| optionNameComponent optionNameRest {
+		$$ = append($1, $2...)
+	}
+
+optionNameComponent : _TYPENAME {
+		$$ = toNameParts($1, 1 /* exclude leading dot */)
+	}
+	| '.' '(' typeIdent ')' {
+		p := &optionNamePartNode{text: $3, isExtension: true}
+		p.setRange($2, $4)
+		$$ = []*optionNamePartNode{p}
+	}
+
+constant : scalarConstant
+	| aggregate
+
+scalarConstant : stringLit {
+		$$ = $1
+	}
+	| intLit {
+		$$ = $1
+	}
+	| negIntLit {
+		$$ = $1
+	}
+	| floatLit {
+		$$ = $1
+	}
+	| name {
+		if $1.val == "true" {
+			$$ = &boolLiteralNode{basicNode: $1.basicNode, val: true}
+		} else if $1.val == "false" {
+			$$ = &boolLiteralNode{basicNode: $1.basicNode, val: false}
+		} else if $1.val == "inf" {
+			f := &floatLiteralNode{val: math.Inf(1)}
+			f.setRange($1, $1)
+			$$ = f
+		} else if $1.val == "nan" {
+			f := &floatLiteralNode{val: math.NaN()}
+			f.setRange($1, $1)
+			$$ = f
+		} else {
+			$$ = $1
+		}
+	}
+
+intLit : _INT_LIT
+	| '+' _INT_LIT {
+		$$ = $2
+	}
+
+negIntLit : '-' _INT_LIT {
+		if $2.val > math.MaxInt64 + 1 {
+			lexError(protolex, $2.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", $2.val, int64(math.MinInt64), int64(math.MaxInt64)))
+		}
+		$$ = &negativeIntLiteralNode{val: -int64($2.val)}
+		$$.setRange($1, $2)
+	}
+
+floatLit : _FLOAT_LIT
+	| '-' _FLOAT_LIT {
+		$$ = &floatLiteralNode{val: -$2.val}
+		$$.setRange($1, $2)
+	}
+	| '+' _FLOAT_LIT {
+		$$ = &floatLiteralNode{val: $2.val}
+		$$.setRange($1, $2)
+	}
+	| '+' _INF {
+		$$ = &floatLiteralNode{val: math.Inf(1)}
+		$$.setRange($1, $2)
+	}
+	| '-' _INF {
+		$$ = &floatLiteralNode{val: math.Inf(-1)}
+		$$.setRange($1, $2)
+	}
+
+stringLit : _STRING_LIT
+    | stringLit _STRING_LIT {
+        $$ = &stringLiteralNode{val: $1.val + $2.val}
+        $$.setRange($1, $2)
+    }
+
+aggregate : '{' aggFields '}' {
+		a := &aggregateLiteralNode{elements: $2}
+		a.setRange($1, $3)
+		$$ = a
+	}
+
+aggFields : aggField
+	| aggFields aggField {
+		$$ = append($1, $2...)
+	}
+	| {
+		$$ = nil
+	}
+
+aggField : aggFieldEntry
+	| aggFieldEntry ',' {
+		$$ = $1
+	}
+	| aggFieldEntry ';' {
+		$$ = $1
+	}
+
+aggFieldEntry : aggName ':' scalarConstant {
+		a := &aggregateEntryNode{name: $1, val: $3}
+		a.setRange($1, $3)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName ':' '[' ']' {
+		s := &sliceLiteralNode{}
+		s.setRange($3, $4)
+		a := &aggregateEntryNode{name: $1, val: s}
+		a.setRange($1, $4)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName ':' '[' constantList ']' {
+		s := &sliceLiteralNode{elements: $4}
+		s.setRange($3, $5)
+		a := &aggregateEntryNode{name: $1, val: s}
+		a.setRange($1, $5)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName ':' aggregate {
+		a := &aggregateEntryNode{name: $1, val: $3}
+		a.setRange($1, $3)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName aggregate {
+		a := &aggregateEntryNode{name: $1, val: $2}
+		a.setRange($1, $2)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName ':' '<' aggFields '>' {
+		s := &aggregateLiteralNode{elements: $4}
+		s.setRange($3, $5)
+		a := &aggregateEntryNode{name: $1, val: s}
+		a.setRange($1, $5)
+		$$ = []*aggregateEntryNode{a}
+	}
+	| aggName '<' aggFields '>' {
+		s := &aggregateLiteralNode{elements: $3}
+		s.setRange($2, $4)
+		a := &aggregateEntryNode{name: $1, val: s}
+		a.setRange($1, $4)
+		$$ = []*aggregateEntryNode{a}
+	}
+
+aggName : name {
+		$$ = &aggregateNameNode{name: $1}
+		$$.setRange($1, $1)
+	}
+	| '[' ident ']' {
+		$$ = &aggregateNameNode{name: $2, isExtension: true}
+		$$.setRange($1, $3)
+	}
+
+constantList : constant {
+		$$ = []valueNode{$1}
+	}
+	| constantList ',' constant {
+		$$ = append($1, $3)
+	}
+	| constantList ';' constant {
+		$$ = append($1, $3)
+	}
+	| '<' aggFields '>' {
+		s := &aggregateLiteralNode{elements: $2}
+		s.setRange($1, $3)
+		$$ = []valueNode{s}
+	}
+	| constantList ','  '<' aggFields '>' {
+		s := &aggregateLiteralNode{elements: $4}
+		s.setRange($3, $5)
+		$$ = append($1, s)
+	}
+	| constantList ';'  '<' aggFields '>' {
+		s := &aggregateLiteralNode{elements: $4}
+		s.setRange($3, $5)
+		$$ = append($1, s)
+	}
+
+typeIdent : ident
+	| _TYPENAME
+
+field : _REQUIRED typeIdent name '=' _INT_LIT ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode, required: true}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+		$$.setRange($1, $6)
+	}
+	| _OPTIONAL typeIdent name '=' _INT_LIT ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+		$$.setRange($1, $6)
+	}
+	| _REPEATED typeIdent name '=' _INT_LIT ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5}
+		$$.setRange($1, $6)
+	}
+	| typeIdent name '=' _INT_LIT ';' {
+		checkTag(protolex, $4.start(), $4.val)
+		$$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+		$$.setRange($1, $5)
+	}
+	| _REQUIRED typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode, required: true}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+		$$.setRange($1, $9)
+	}
+	| _OPTIONAL typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+		$$.setRange($1, $9)
+	}
+	| _REPEATED typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $5.start(), $5.val)
+		lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+		$$ = &fieldNode{label: lbl, fldType: $2, name: $3, tag: $5, options: $7}
+		$$.setRange($1, $9)
+	}
+	| typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $4.start(), $4.val)
+		$$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $6}
+		$$.setRange($1, $8)
+	}
+
+fieldOptions : fieldOptions ',' fieldOption {
+		$$ = append($1, $3...)
+	}
+	| fieldOption
+
+fieldOption: optionName '=' constant {
+		n := &optionNameNode{parts: $1}
+		n.setRange($1[0], $1[len($1)-1])
+		o := &optionNode{name: n, val: $3}
+		o.setRange($1[0], $3)
+		$$ = []*optionNode{o}
+	}
+
+group : _REQUIRED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+		checkTag(protolex, $5.start(), $5.val)
+		if !unicode.IsUpper(rune($3.val[0])) {
+			lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+		}
+		lbl := &labelNode{basicNode: $1.basicNode, required: true}
+		$$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+		$$.setRange($1, $8)
+	}
+	| _OPTIONAL _GROUP name '=' _INT_LIT '{' messageBody '}' {
+		checkTag(protolex, $5.start(), $5.val)
+		if !unicode.IsUpper(rune($3.val[0])) {
+			lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+		}
+		lbl := &labelNode{basicNode: $1.basicNode}
+		$$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+		$$.setRange($1, $8)
+	}
+	| _REPEATED _GROUP name '=' _INT_LIT '{' messageBody '}' {
+		checkTag(protolex, $5.start(), $5.val)
+		if !unicode.IsUpper(rune($3.val[0])) {
+			lexError(protolex, $3.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", $3.val))
+		}
+		lbl := &labelNode{basicNode: $1.basicNode, repeated: true}
+		$$ = &groupNode{groupKeyword: $2, label: lbl, name: $3, tag: $5, decls: $7}
+		$$.setRange($1, $8)
+	}
+
+oneof : _ONEOF name '{' oneofBody '}' {
+		c := 0
+		for _, el := range $4 {
+			if el.field != nil {
+				c++
+			}
+		}
+		if c == 0 {
+			lexError(protolex, $1.start(), "oneof must contain at least one field")
+		}
+		$$ = &oneOfNode{name: $2, decls: $4}
+		$$.setRange($1, $5)
+	}
+
+oneofBody : oneofBody oneofItem {
+		$$ = append($1, $2...)
+	}
+	| oneofItem
+	| {
+		$$ = nil
+	}
+
+oneofItem : option {
+		$$ = []*oneOfElement{{option: $1[0]}}
+	}
+	| oneofField {
+		$$ = []*oneOfElement{{field: $1}}
+	}
+	| ';' {
+		$$ = []*oneOfElement{{empty: $1}}
+	}
+
+oneofField : typeIdent name '=' _INT_LIT ';' {
+		checkTag(protolex, $4.start(), $4.val)
+		$$ = &fieldNode{fldType: $1, name: $2, tag: $4}
+		$$.setRange($1, $5)
+	}
+	| typeIdent name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $4.start(), $4.val)
+		$$ = &fieldNode{fldType: $1, name: $2, tag: $4, options: $6}
+		$$.setRange($1, $8)
+	}
+
+mapField : _MAP '<' keyType ',' typeIdent '>' name '=' _INT_LIT ';' {
+		checkTag(protolex, $9.start(), $9.val)
+		$$ = &mapFieldNode{mapKeyword: $1, keyType: $3, valueType: $5, name: $7, tag: $9}
+		$$.setRange($1, $10)
+	}
+	| _MAP '<' keyType ',' typeIdent '>' name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkTag(protolex, $9.start(), $9.val)
+		$$ = &mapFieldNode{mapKeyword: $1, keyType: $3, valueType: $5, name: $7, tag: $9, options: $11}
+		$$.setRange($1, $13)
+	}
+
+keyType : _INT32
+	| _INT64
+	| _UINT32
+	| _UINT64
+	| _SINT32
+	| _SINT64
+	| _FIXED32
+	| _FIXED64
+	| _SFIXED32
+	| _SFIXED64
+	| _BOOL
+	| _STRING
+
+extensions : _EXTENSIONS tagRanges ';' {
+		$$ = &extensionRangeNode{ranges: $2}
+		$$.setRange($1, $3)
+	}
+	| _EXTENSIONS tagRanges '[' fieldOptions ']' ';' {
+		$$ = &extensionRangeNode{ranges: $2, options: $4}
+		$$.setRange($1, $6)
+	}
+
+tagRanges : tagRanges ',' tagRange {
+		$$ = append($1, $3...)
+	}
+	| tagRange
+
+tagRange : _INT_LIT {
+		if $1.val > internal.MaxTag {
+			lexError(protolex, $1.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+		}
+		r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+		r.setRange($1, $1)
+		$$ = []*rangeNode{r}
+	}
+	| _INT_LIT _TO _INT_LIT {
+		if $1.val > internal.MaxTag {
+			lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+		}
+		if $3.val > internal.MaxTag {
+			lexError(protolex, $3.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", $3.val, internal.MaxTag))
+		}
+		if $1.val > $3.val {
+			lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+		}
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+	| _INT_LIT _TO _MAX {
+		if $1.val > internal.MaxTag {
+			lexError(protolex, $1.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", $1.val, internal.MaxTag))
+		}
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: internal.MaxTag}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+
+enumRanges : enumRanges ',' enumRange {
+		$$ = append($1, $3...)
+	}
+	| enumRange
+
+enumRange : _INT_LIT {
+		checkUint64InInt32Range(protolex, $1.start(), $1.val)
+		r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+		r.setRange($1, $1)
+		$$ = []*rangeNode{r}
+	}
+	| negIntLit {
+		checkInt64InInt32Range(protolex, $1.start(), $1.val)
+		r := &rangeNode{stNode: $1, enNode: $1, st: int32($1.val), en: int32($1.val)}
+		r.setRange($1, $1)
+		$$ = []*rangeNode{r}
+	}
+	| _INT_LIT _TO _INT_LIT {
+		checkUint64InInt32Range(protolex, $1.start(), $1.val)
+		checkUint64InInt32Range(protolex, $3.start(), $3.val)
+		if $1.val > $3.val {
+			lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+		}
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+	| negIntLit _TO negIntLit {
+		checkInt64InInt32Range(protolex, $1.start(), $1.val)
+		checkInt64InInt32Range(protolex, $3.start(), $3.val)
+		if $1.val > $3.val {
+			lexError(protolex, $1.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", $1.val, $3.val))
+		}
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+	| negIntLit _TO _INT_LIT {
+		checkInt64InInt32Range(protolex, $1.start(), $1.val)
+		checkUint64InInt32Range(protolex, $3.start(), $3.val)
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: int32($3.val)}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+	| _INT_LIT _TO _MAX {
+		checkUint64InInt32Range(protolex, $1.start(), $1.val)
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: math.MaxInt32}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+	| negIntLit _TO _MAX {
+		checkInt64InInt32Range(protolex, $1.start(), $1.val)
+		r := &rangeNode{stNode: $1, enNode: $3, st: int32($1.val), en: math.MaxInt32}
+		r.setRange($1, $3)
+		$$ = []*rangeNode{r}
+	}
+
+msgReserved : _RESERVED tagRanges ';' {
+		$$ = &reservedNode{ranges: $2}
+		$$.setRange($1, $3)
+	}
+	| reservedNames
+
+enumReserved : _RESERVED enumRanges ';' {
+		$$ = &reservedNode{ranges: $2}
+		$$.setRange($1, $3)
+	}
+	| reservedNames
+
+reservedNames : _RESERVED fieldNames ';' {
+		rsvd := map[string]struct{}{}
+		for _, n := range $2 {
+			if _, ok := rsvd[n.val]; ok {
+				lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+				break
+			}
+			rsvd[n.val] = struct{}{}
+		}
+		$$ = &reservedNode{names: $2}
+		$$.setRange($1, $3)
+	}
+
+fieldNames : fieldNames ',' stringLit {
+		$$ = append($1, $3)
+	}
+	| stringLit {
+		$$ = []*stringLiteralNode{$1}
+	}
+
+enum : _ENUM name '{' enumBody '}' {
+		c := 0
+		for _, el := range $4 {
+			if el.value != nil {
+				c++
+			}
+		}
+		if c == 0 {
+			lexError(protolex, $1.start(), "enums must define at least one value")
+		}
+		$$ = &enumNode{name: $2, decls: $4}
+		$$.setRange($1, $5)
+	}
+
+enumBody : enumBody enumItem {
+		$$ = append($1, $2...)
+	}
+	| enumItem
+	| {
+		$$ = nil
+	}
+
+enumItem : option {
+		$$ = []*enumElement{{option: $1[0]}}
+	}
+	| enumField {
+		$$ = []*enumElement{{value: $1}}
+	}
+	| enumReserved {
+		$$ = []*enumElement{{reserved: $1}}
+	}
+	| ';' {
+		$$ = []*enumElement{{empty: $1}}
+	}
+
+enumField : name '=' _INT_LIT ';' {
+		checkUint64InInt32Range(protolex, $3.start(), $3.val)
+		$$ = &enumValueNode{name: $1, numberP: $3}
+		$$.setRange($1, $4)
+	}
+	|  name '=' _INT_LIT '[' fieldOptions ']' ';' {
+		checkUint64InInt32Range(protolex, $3.start(), $3.val)
+		$$ = &enumValueNode{name: $1, numberP: $3, options: $5}
+		$$.setRange($1, $7)
+	}
+	| name '=' negIntLit ';' {
+		checkInt64InInt32Range(protolex, $3.start(), $3.val)
+		$$ = &enumValueNode{name: $1, numberN: $3}
+		$$.setRange($1, $4)
+	}
+	|  name '=' negIntLit '[' fieldOptions ']' ';' {
+		checkInt64InInt32Range(protolex, $3.start(), $3.val)
+		$$ = &enumValueNode{name: $1, numberN: $3, options: $5}
+		$$.setRange($1, $7)
+	}
+
+message : _MESSAGE name '{' messageBody '}' {
+		$$ = &messageNode{name: $2, decls: $4}
+		$$.setRange($1, $5)
+	}
+
+messageBody : messageBody messageItem {
+		$$ = append($1, $2...)
+	}
+	| messageItem
+	| {
+		$$ = nil
+	}
+
+messageItem : field {
+		$$ = []*messageElement{{field: $1}}
+	}
+	| enum {
+		$$ = []*messageElement{{enum: $1}}
+	}
+	| message {
+		$$ = []*messageElement{{nested: $1}}
+	}
+	| extend {
+		$$ = []*messageElement{{extend: $1}}
+	}
+	| extensions {
+		$$ = []*messageElement{{extensionRange: $1}}
+	}
+	| group {
+		$$ = []*messageElement{{group: $1}}
+	}
+	| option {
+		$$ = []*messageElement{{option: $1[0]}}
+	}
+	| oneof {
+		$$ = []*messageElement{{oneOf: $1}}
+	}
+	| mapField {
+		$$ = []*messageElement{{mapField: $1}}
+	}
+	| msgReserved {
+		$$ = []*messageElement{{reserved: $1}}
+	}
+	| ';' {
+		$$ = []*messageElement{{empty: $1}}
+	}
+
+extend : _EXTEND typeIdent '{' extendBody '}' {
+		c := 0
+		for _, el := range $4 {
+			if el.field != nil || el.group != nil {
+				c++
+			}
+		}
+		if c == 0 {
+			lexError(protolex, $1.start(), "extend sections must define at least one extension")
+		}
+		$$ = &extendNode{extendee: $2, decls: $4}
+		$$.setRange($1, $5)
+	}
+
+extendBody : extendBody extendItem {
+		$$ = append($1, $2...)
+	}
+	| extendItem
+	| {
+		$$ = nil
+	}
+
+extendItem : field {
+		$$ = []*extendElement{{field: $1}}
+	}
+	| group {
+		$$ = []*extendElement{{group: $1}}
+	}
+	| ';' {
+		$$ = []*extendElement{{empty: $1}}
+	}
+
+service : _SERVICE name '{' serviceBody '}' {
+		$$ = &serviceNode{name: $2, decls: $4}
+		$$.setRange($1, $5)
+	}
+
+serviceBody : serviceBody serviceItem {
+		$$ = append($1, $2...)
+	}
+	| serviceItem
+	| {
+		$$ = nil
+	}
+
+// NB: doc suggests support for "stream" declaration, separate from "rpc", but
+// it does not appear to be supported in protoc (doc is likely from grammar for
+// Google-internal version of protoc, with support for streaming stubby)
+serviceItem : option {
+		$$ = []*serviceElement{{option: $1[0]}}
+	}
+	| rpc {
+		$$ = []*serviceElement{{rpc: $1}}
+	}
+	| ';' {
+		$$ = []*serviceElement{{empty: $1}}
+	}
+
+rpc : _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' ';' {
+		$$ = &methodNode{name: $2, input: $4, output: $8}
+		$$.setRange($1, $10)
+	}
+	| _RPC name '(' rpcType ')' _RETURNS '(' rpcType ')' '{' rpcOptions '}' {
+		$$ = &methodNode{name: $2, input: $4, output: $8, options: $11}
+		$$.setRange($1, $12)
+	}
+
+rpcType : _STREAM typeIdent {
+		$$ = &rpcTypeNode{msgType: $2, streamKeyword: $1}
+		$$.setRange($1, $2)
+	}
+	| typeIdent {
+		$$ = &rpcTypeNode{msgType: $1}
+		$$.setRange($1, $1)
+	}
+
+rpcOptions : rpcOptions rpcOption {
+		$$ = append($1, $2...)
+	}
+	| rpcOption
+	| {
+		$$ = []*optionNode{}
+	}
+
+rpcOption : option {
+		$$ = $1
+	}
+	| ';' {
+		$$ = []*optionNode{}
+	}
+
+name : _NAME
+	| _SYNTAX
+	| _IMPORT
+	| _WEAK
+	| _PUBLIC
+	| _PACKAGE
+	| _OPTION
+	| _TRUE
+	| _FALSE
+	| _INF
+	| _NAN
+	| _REPEATED
+	| _OPTIONAL
+	| _REQUIRED
+	| _DOUBLE
+	| _FLOAT
+	| _INT32
+	| _INT64
+	| _UINT32
+	| _UINT64
+	| _SINT32
+	| _SINT64
+	| _FIXED32
+	| _FIXED64
+	| _SFIXED32
+	| _SFIXED64
+	| _BOOL
+	| _STRING
+	| _BYTES
+	| _GROUP
+	| _ONEOF
+	| _MAP
+	| _EXTENSIONS
+	| _TO
+	| _MAX
+	| _RESERVED
+	| _ENUM
+	| _MESSAGE
+	| _EXTEND
+	| _SERVICE
+	| _RPC
+	| _STREAM
+	| _RETURNS
+
+%%
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
new file mode 100644
index 0000000..6b8a4e6
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/proto.y.go
@@ -0,0 +1,2093 @@
+// Code generated by goyacc -o proto.y.go -p proto proto.y. DO NOT EDIT.
+
+//line proto.y:2
+package protoparse
+
+import __yyfmt__ "fmt"
+
+//line proto.y:2
+
+//lint:file-ignore SA4006 generated parser has unused values
+
+import (
+	"fmt"
+	"math"
+	"unicode"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+//line proto.y:18
+type protoSymType struct {
+	yys       int
+	file      *fileNode
+	fileDecls []*fileElement
+	syn       *syntaxNode
+	pkg       *packageNode
+	imprt     *importNode
+	msg       *messageNode
+	msgDecls  []*messageElement
+	fld       *fieldNode
+	mapFld    *mapFieldNode
+	grp       *groupNode
+	oo        *oneOfNode
+	ooDecls   []*oneOfElement
+	ext       *extensionRangeNode
+	resvd     *reservedNode
+	en        *enumNode
+	enDecls   []*enumElement
+	env       *enumValueNode
+	extend    *extendNode
+	extDecls  []*extendElement
+	svc       *serviceNode
+	svcDecls  []*serviceElement
+	mtd       *methodNode
+	rpcType   *rpcTypeNode
+	opts      []*optionNode
+	optNm     []*optionNamePartNode
+	rngs      []*rangeNode
+	names     []*stringLiteralNode
+	sl        []valueNode
+	agg       []*aggregateEntryNode
+	aggName   *aggregateNameNode
+	v         valueNode
+	str       *stringLiteralNode
+	i         *negativeIntLiteralNode
+	ui        *intLiteralNode
+	f         *floatLiteralNode
+	id        *identNode
+	b         *basicNode
+	err       error
+}
+
+const _STRING_LIT = 57346
+const _INT_LIT = 57347
+const _FLOAT_LIT = 57348
+const _NAME = 57349
+const _FQNAME = 57350
+const _TYPENAME = 57351
+const _SYNTAX = 57352
+const _IMPORT = 57353
+const _WEAK = 57354
+const _PUBLIC = 57355
+const _PACKAGE = 57356
+const _OPTION = 57357
+const _TRUE = 57358
+const _FALSE = 57359
+const _INF = 57360
+const _NAN = 57361
+const _REPEATED = 57362
+const _OPTIONAL = 57363
+const _REQUIRED = 57364
+const _DOUBLE = 57365
+const _FLOAT = 57366
+const _INT32 = 57367
+const _INT64 = 57368
+const _UINT32 = 57369
+const _UINT64 = 57370
+const _SINT32 = 57371
+const _SINT64 = 57372
+const _FIXED32 = 57373
+const _FIXED64 = 57374
+const _SFIXED32 = 57375
+const _SFIXED64 = 57376
+const _BOOL = 57377
+const _STRING = 57378
+const _BYTES = 57379
+const _GROUP = 57380
+const _ONEOF = 57381
+const _MAP = 57382
+const _EXTENSIONS = 57383
+const _TO = 57384
+const _MAX = 57385
+const _RESERVED = 57386
+const _ENUM = 57387
+const _MESSAGE = 57388
+const _EXTEND = 57389
+const _SERVICE = 57390
+const _RPC = 57391
+const _STREAM = 57392
+const _RETURNS = 57393
+const _ERROR = 57394
+
+var protoToknames = [...]string{
+	"$end",
+	"error",
+	"$unk",
+	"_STRING_LIT",
+	"_INT_LIT",
+	"_FLOAT_LIT",
+	"_NAME",
+	"_FQNAME",
+	"_TYPENAME",
+	"_SYNTAX",
+	"_IMPORT",
+	"_WEAK",
+	"_PUBLIC",
+	"_PACKAGE",
+	"_OPTION",
+	"_TRUE",
+	"_FALSE",
+	"_INF",
+	"_NAN",
+	"_REPEATED",
+	"_OPTIONAL",
+	"_REQUIRED",
+	"_DOUBLE",
+	"_FLOAT",
+	"_INT32",
+	"_INT64",
+	"_UINT32",
+	"_UINT64",
+	"_SINT32",
+	"_SINT64",
+	"_FIXED32",
+	"_FIXED64",
+	"_SFIXED32",
+	"_SFIXED64",
+	"_BOOL",
+	"_STRING",
+	"_BYTES",
+	"_GROUP",
+	"_ONEOF",
+	"_MAP",
+	"_EXTENSIONS",
+	"_TO",
+	"_MAX",
+	"_RESERVED",
+	"_ENUM",
+	"_MESSAGE",
+	"_EXTEND",
+	"_SERVICE",
+	"_RPC",
+	"_STREAM",
+	"_RETURNS",
+	"_ERROR",
+	"'='",
+	"';'",
+	"':'",
+	"'{'",
+	"'}'",
+	"'\\\\'",
+	"'/'",
+	"'?'",
+	"'.'",
+	"','",
+	"'>'",
+	"'<'",
+	"'+'",
+	"'-'",
+	"'('",
+	"')'",
+	"'['",
+	"']'",
+	"'*'",
+	"'&'",
+	"'^'",
+	"'%'",
+	"'$'",
+	"'#'",
+	"'@'",
+	"'!'",
+	"'~'",
+	"'`'",
+}
+var protoStatenames = [...]string{}
+
+const protoEofCode = 1
+const protoErrCode = 2
+const protoInitialStackSize = 16
+
+//line proto.y:937
+
+//line yacctab:1
+var protoExca = [...]int{
+	-1, 1,
+	1, -1,
+	-2, 0,
+}
+
+const protoPrivate = 57344
+
+const protoLast = 2050
+
+var protoAct = [...]int{
+
+	120, 8, 288, 8, 8, 386, 264, 80, 128, 113,
+	159, 160, 265, 271, 103, 196, 185, 112, 100, 101,
+	29, 171, 8, 28, 75, 119, 99, 114, 79, 153,
+	137, 148, 266, 184, 24, 139, 306, 255, 77, 78,
+	319, 82, 306, 83, 389, 86, 87, 306, 318, 74,
+	378, 306, 98, 306, 306, 363, 317, 306, 306, 361,
+	306, 359, 351, 222, 379, 338, 337, 366, 307, 328,
+	377, 224, 325, 322, 304, 280, 278, 286, 223, 380,
+	315, 356, 367, 197, 329, 90, 243, 326, 323, 305,
+	281, 279, 297, 140, 111, 154, 27, 197, 249, 214,
+	209, 106, 188, 336, 246, 276, 241, 330, 240, 211,
+	105, 173, 245, 144, 242, 287, 224, 208, 381, 150,
+	382, 149, 176, 146, 327, 207, 324, 163, 16, 226,
+	94, 93, 92, 91, 177, 179, 181, 16, 199, 140,
+	79, 75, 85, 392, 199, 383, 368, 199, 374, 183,
+	78, 77, 373, 154, 16, 187, 191, 372, 199, 144,
+	198, 365, 157, 174, 85, 191, 74, 156, 355, 146,
+	189, 206, 212, 150, 193, 149, 388, 354, 204, 201,
+	163, 210, 203, 14, 333, 158, 15, 16, 157, 85,
+	85, 88, 97, 156, 213, 16, 202, 335, 215, 216,
+	217, 218, 219, 220, 308, 262, 261, 4, 14, 244,
+	260, 15, 16, 376, 96, 259, 258, 18, 17, 19,
+	20, 257, 254, 256, 221, 339, 13, 272, 252, 194,
+	105, 75, 163, 248, 388, 275, 250, 390, 283, 95,
+	84, 267, 18, 17, 19, 20, 89, 23, 247, 225,
+	352, 13, 268, 303, 168, 169, 27, 186, 290, 302,
+	198, 282, 277, 285, 295, 301, 206, 170, 300, 5,
+	299, 272, 105, 22, 163, 163, 284, 117, 11, 275,
+	11, 11, 165, 166, 310, 312, 313, 75, 314, 75,
+	269, 22, 27, 155, 298, 167, 311, 186, 316, 11,
+	25, 26, 263, 168, 293, 320, 85, 206, 27, 152,
+	12, 147, 331, 75, 75, 163, 163, 3, 141, 332,
+	21, 115, 10, 138, 10, 10, 118, 195, 142, 105,
+	345, 75, 206, 347, 75, 123, 349, 75, 190, 105,
+	105, 163, 346, 10, 270, 348, 116, 9, 350, 9,
+	9, 122, 357, 121, 358, 273, 176, 353, 176, 369,
+	176, 334, 163, 161, 163, 290, 292, 104, 9, 206,
+	206, 340, 342, 102, 375, 75, 162, 227, 163, 163,
+	172, 385, 7, 387, 6, 2, 387, 384, 75, 1,
+	0, 391, 27, 107, 110, 31, 0, 0, 32, 33,
+	34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+	0, 0, 0, 0, 106, 0, 0, 0, 0, 0,
+	0, 0, 294, 108, 109, 0, 0, 0, 291, 27,
+	107, 110, 31, 0, 0, 32, 33, 34, 35, 36,
+	37, 38, 39, 40, 41, 42, 43, 44, 45, 46,
+	47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+	57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+	67, 68, 69, 70, 71, 72, 73, 0, 0, 0,
+	0, 106, 0, 0, 0, 0, 0, 0, 0, 253,
+	108, 109, 0, 0, 251, 27, 107, 110, 31, 0,
+	0, 32, 33, 34, 35, 36, 37, 38, 39, 40,
+	41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+	51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
+	61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
+	71, 72, 73, 0, 0, 0, 0, 106, 0, 0,
+	0, 0, 0, 0, 0, 343, 108, 109, 27, 107,
+	110, 31, 0, 0, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	106, 0, 0, 0, 0, 0, 0, 0, 341, 108,
+	109, 27, 107, 110, 31, 0, 0, 32, 33, 34,
+	35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
+	45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+	55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+	65, 66, 67, 68, 69, 70, 71, 72, 73, 0,
+	0, 0, 0, 106, 0, 0, 0, 0, 0, 0,
+	0, 31, 108, 109, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 371, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 370, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 344, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 321, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 296, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 205, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 31, 0, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 228, 229, 230, 231,
+	232, 233, 234, 235, 236, 237, 238, 239, 0, 0,
+	0, 31, 30, 164, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+	58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
+	68, 69, 70, 71, 72, 73, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+	0, 76, 31, 30, 81, 32, 33, 34, 35, 36,
+	133, 38, 39, 40, 41, 127, 126, 125, 45, 46,
+	47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+	57, 58, 59, 60, 134, 135, 132, 64, 65, 136,
+	129, 130, 131, 70, 71, 72, 73, 0, 0, 124,
+	0, 0, 364, 31, 30, 81, 32, 33, 34, 35,
+	36, 133, 38, 39, 40, 41, 127, 126, 125, 45,
+	46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
+	56, 57, 58, 59, 60, 134, 135, 132, 64, 65,
+	136, 129, 130, 131, 70, 71, 72, 73, 0, 0,
+	124, 0, 0, 362, 31, 30, 81, 32, 33, 34,
+	35, 36, 133, 38, 39, 40, 41, 127, 126, 125,
+	45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+	55, 56, 57, 58, 59, 60, 134, 135, 132, 64,
+	65, 136, 129, 130, 131, 70, 71, 72, 73, 0,
+	0, 124, 0, 0, 360, 31, 30, 81, 32, 33,
+	34, 35, 36, 133, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+	0, 0, 274, 0, 0, 309, 31, 30, 81, 32,
+	33, 34, 35, 36, 37, 38, 39, 40, 41, 127,
+	126, 125, 45, 46, 47, 48, 49, 50, 51, 52,
+	53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
+	63, 64, 65, 66, 67, 68, 69, 70, 71, 72,
+	73, 0, 0, 151, 0, 0, 200, 31, 30, 81,
+	32, 33, 34, 35, 36, 133, 38, 39, 40, 41,
+	127, 126, 125, 45, 46, 47, 48, 49, 50, 51,
+	52, 53, 54, 55, 56, 57, 58, 59, 60, 134,
+	135, 132, 64, 65, 136, 129, 130, 131, 70, 71,
+	72, 73, 0, 0, 124, 31, 0, 175, 32, 33,
+	34, 35, 36, 133, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+	64, 65, 145, 67, 68, 69, 70, 71, 72, 73,
+	0, 0, 143, 0, 0, 192, 31, 30, 81, 32,
+	33, 34, 35, 36, 133, 38, 39, 40, 41, 127,
+	126, 125, 45, 46, 47, 48, 49, 50, 51, 52,
+	53, 54, 55, 56, 57, 58, 59, 60, 134, 135,
+	132, 64, 65, 136, 129, 130, 131, 70, 71, 72,
+	73, 0, 0, 124, 31, 30, 81, 32, 33, 34,
+	35, 36, 133, 38, 39, 40, 41, 42, 43, 44,
+	45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+	55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
+	65, 66, 67, 68, 69, 70, 71, 72, 73, 0,
+	0, 274, 31, 30, 81, 32, 33, 34, 35, 36,
+	37, 38, 39, 40, 41, 127, 126, 125, 45, 46,
+	47, 48, 49, 50, 51, 52, 53, 54, 55, 56,
+	57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+	67, 68, 69, 70, 71, 72, 73, 31, 0, 151,
+	32, 33, 34, 35, 36, 133, 38, 39, 40, 41,
+	42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+	52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
+	62, 63, 64, 65, 145, 67, 68, 69, 70, 71,
+	72, 73, 0, 0, 143, 31, 30, 81, 32, 33,
+	34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 289, 73,
+	31, 30, 81, 32, 33, 34, 35, 36, 37, 38,
+	39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+	49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+	59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+	69, 70, 71, 72, 73, 31, 30, 81, 32, 33,
+	34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 182, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+	31, 30, 81, 32, 33, 34, 35, 36, 37, 38,
+	39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+	49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+	59, 180, 61, 62, 63, 64, 65, 66, 67, 68,
+	69, 70, 71, 72, 73, 31, 30, 81, 32, 33,
+	34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 178, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+	31, 30, 0, 32, 33, 34, 35, 36, 37, 38,
+	39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
+	49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
+	59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
+	69, 70, 71, 72, 73, 31, 0, 0, 32, 33,
+	34, 35, 36, 37, 38, 39, 40, 41, 42, 43,
+	44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
+	54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
+	64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
+}
+var protoPact = [...]int{
+
+	197, -1000, 172, 172, 194, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, 288, 1953, 1124, 1998, 1998, 1773,
+	1998, 172, -1000, 304, 186, 304, 304, -1000, 137, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, 193, -1000, 1773, 77, 76, 75,
+	-1000, -1000, 74, 185, -1000, -1000, 160, 138, -1000, 647,
+	26, 1539, 1680, 1635, 113, -1000, -1000, -1000, 131, -1000,
+	-1000, 302, -1000, -1000, -1000, -1000, 1064, -1000, 277, 249,
+	-1000, 102, 1440, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, 1908, 1863, 1818, 1998, 1998,
+	1998, 1773, 292, 1124, 1998, 38, 252, -1000, 1488, -1000,
+	-1000, -1000, -1000, -1000, 176, 92, -1000, 1389, -1000, -1000,
+	-1000, -1000, 139, -1000, -1000, -1000, -1000, 1998, -1000, 1004,
+	-1000, 63, 45, -1000, 1953, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, 102, -1000, 32, -1000, -1000, 1998, 1998, 1998,
+	1998, 1998, 1998, 171, 9, -1000, 207, 73, 1091, 54,
+	52, 302, -1000, -1000, 81, 50, -1000, 206, 191, 298,
+	-1000, -1000, -1000, -1000, 31, -1000, -1000, -1000, -1000, 455,
+	-1000, 1064, -33, -1000, 1773, 168, 163, 162, 157, 153,
+	152, 297, -1000, 1124, 292, 247, 1587, 43, -1000, -1000,
+	-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+	-1000, -1000, 304, 22, 21, 78, -1000, 233, 72, 1728,
+	-1000, 388, -1000, 1064, 944, -1000, 24, 289, 265, 263,
+	260, 254, 248, 20, -2, -1000, 151, -1000, -1000, -1000,
+	1338, -1000, -1000, -1000, -1000, 1998, 1773, 302, -1000, 1124,
+	-1000, 1124, -1000, -1000, -1000, -1000, -1000, -1000, 12, 1773,
+	-1000, -1000, -14, -1000, 1064, 884, -1000, -1000, 19, 70,
+	18, 68, 15, 51, -1000, 1124, 1124, 130, 647, -1000,
+	-1000, 144, 40, -4, -5, 174, -1000, -1000, 584, 521,
+	824, -1000, -1000, 1124, 1539, -1000, 1124, 1539, -1000, 1124,
+	1539, -8, -1000, -1000, -1000, 245, 1998, 123, 114, 14,
+	-1000, 1064, -1000, 1064, -1000, -9, 1287, -11, 1236, -15,
+	1185, 107, 13, 93, -1000, -1000, 1728, 764, 704, 103,
+	-1000, 98, -1000, 94, -1000, -1000, -1000, 1124, 208, 2,
+	-1000, -1000, -1000, -1000, -1000, -20, 10, 64, 91, -1000,
+	1124, -1000, 122, -1000, -26, 180, -1000, -1000, -1000, 89,
+	-1000, -1000, -1000,
+}
+var protoPgo = [...]int{
+
+	0, 389, 385, 269, 317, 384, 382, 0, 12, 6,
+	5, 381, 32, 21, 380, 52, 26, 18, 20, 7,
+	8, 377, 376, 14, 373, 367, 366, 10, 11, 363,
+	27, 355, 353, 25, 351, 346, 9, 17, 13, 344,
+	338, 335, 328, 30, 16, 33, 15, 327, 326, 321,
+	35, 323, 318, 277, 31, 311, 19, 310, 29, 309,
+	293, 2,
+}
+var protoR1 = [...]int{
+
+	0, 1, 1, 1, 1, 4, 4, 3, 3, 3,
+	3, 3, 3, 3, 3, 2, 5, 5, 5, 6,
+	19, 19, 7, 12, 12, 12, 13, 13, 14, 14,
+	15, 15, 16, 16, 16, 16, 16, 24, 24, 23,
+	25, 25, 25, 25, 25, 56, 56, 17, 27, 27,
+	27, 28, 28, 28, 29, 29, 29, 29, 29, 29,
+	29, 22, 22, 26, 26, 26, 26, 26, 26, 20,
+	20, 30, 30, 30, 30, 30, 30, 30, 30, 9,
+	9, 8, 33, 33, 33, 32, 39, 39, 39, 38,
+	38, 38, 31, 31, 34, 34, 21, 21, 21, 21,
+	21, 21, 21, 21, 21, 21, 21, 21, 48, 48,
+	45, 45, 44, 44, 44, 47, 47, 46, 46, 46,
+	46, 46, 46, 46, 41, 41, 42, 42, 43, 40,
+	40, 49, 51, 51, 51, 50, 50, 50, 50, 52,
+	52, 52, 52, 35, 37, 37, 37, 36, 36, 36,
+	36, 36, 36, 36, 36, 36, 36, 36, 53, 55,
+	55, 55, 54, 54, 54, 57, 59, 59, 59, 58,
+	58, 58, 60, 60, 61, 61, 11, 11, 11, 10,
+	10, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+	18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+	18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+	18, 18, 18, 18, 18, 18, 18, 18, 18, 18,
+	18, 18, 18, 18,
+}
+var protoR2 = [...]int{
+
+	0, 1, 1, 2, 0, 2, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 4, 3, 4, 4, 3,
+	1, 1, 5, 1, 3, 4, 1, 2, 1, 4,
+	1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
+	1, 2, 2, 2, 2, 1, 2, 3, 1, 2,
+	0, 1, 2, 2, 3, 4, 5, 3, 2, 5,
+	4, 1, 3, 1, 3, 3, 3, 5, 5, 1,
+	1, 6, 6, 6, 5, 9, 9, 9, 8, 3,
+	1, 3, 8, 8, 8, 5, 2, 1, 0, 1,
+	1, 1, 5, 8, 10, 13, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 3, 6,
+	3, 1, 1, 3, 3, 3, 1, 1, 1, 3,
+	3, 3, 3, 3, 3, 1, 3, 1, 3, 3,
+	1, 5, 2, 1, 0, 1, 1, 1, 1, 4,
+	7, 4, 7, 5, 2, 1, 0, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 5, 2,
+	1, 0, 1, 1, 1, 5, 2, 1, 0, 1,
+	1, 1, 10, 12, 2, 1, 2, 1, 0, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+	1, 1, 1, 1,
+}
+var protoChk = [...]int{
+
+	-1000, -1, -2, -4, 10, -3, -5, -6, -7, -35,
+	-49, -53, -57, 54, 11, 14, 15, 46, 45, 47,
+	48, -4, -3, 53, -56, 12, 13, 4, -19, -18,
+	8, 7, 10, 11, 12, 13, 14, 15, 16, 17,
+	18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+	28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+	38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
+	48, 49, 50, 51, -12, -19, 67, -18, -18, -20,
+	-19, 9, -18, -56, 54, 4, -56, -56, 54, 53,
+	-20, 56, 56, 56, 56, 54, 54, 54, -15, -16,
+	-17, -56, -24, -23, -25, -18, 56, 5, 65, 66,
+	6, 68, -37, -36, -30, -49, -35, -53, -48, -33,
+	-7, -32, -34, -41, 54, 22, 21, 20, -20, 45,
+	46, 47, 41, 15, 39, 40, 44, -43, -51, -50,
+	-7, -52, -42, 54, -18, 44, -43, -55, -54, -30,
+	-33, 54, -59, -58, -7, -60, 54, 49, 54, -27,
+	-28, -29, -22, -18, 69, 5, 6, 18, 5, 6,
+	18, -13, -14, 9, 61, 57, -36, -20, 38, -20,
+	38, -20, 38, -18, -45, -44, 5, -18, 64, -45,
+	-40, -56, 57, -50, 53, -47, -46, 5, -23, 66,
+	57, -54, 57, -58, -18, 57, -28, 62, 54, 55,
+	-17, 64, -19, -13, 67, -18, -18, -18, -18, -18,
+	-18, 53, 54, 69, 62, 42, 56, -21, 25, 26,
+	27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
+	54, 54, 62, 5, -23, 62, 54, 42, 42, 67,
+	-16, 69, -17, 64, -27, 70, -20, 53, 53, 53,
+	53, 53, 53, 5, -9, -8, -12, -44, 5, 43,
+	-39, -38, -7, -31, 54, -20, 62, -56, 54, 69,
+	54, 69, -46, 5, 43, -23, 5, 43, -61, 50,
+	-20, 70, -26, -15, 64, -27, 63, 68, 5, 5,
+	5, 5, 5, 5, 54, 69, 62, 70, 53, 57,
+	-38, -18, -20, -9, -9, 68, -20, 70, 62, 54,
+	-27, 63, 54, 69, 56, 54, 69, 56, 54, 69,
+	56, -9, -8, 54, -15, 53, 63, 70, 70, 51,
+	-15, 64, -15, 64, 63, -9, -37, -9, -37, -9,
+	-37, 70, 5, -18, 54, 54, 67, -27, -27, 70,
+	57, 70, 57, 70, 57, 54, 54, 69, 53, -61,
+	63, 63, 54, 54, 54, -9, 5, 68, 70, 54,
+	69, 54, 56, 54, -9, -11, -10, -7, 54, 70,
+	57, -10, 54,
+}
+var protoDef = [...]int{
+
+	4, -2, 1, 2, 0, 6, 7, 8, 9, 10,
+	11, 12, 13, 14, 0, 0, 0, 0, 0, 0,
+	0, 3, 5, 0, 0, 0, 0, 45, 0, 20,
+	21, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+	190, 191, 192, 193, 194, 195, 196, 197, 198, 199,
+	200, 201, 202, 203, 204, 205, 206, 207, 208, 209,
+	210, 211, 212, 213, 214, 215, 216, 217, 218, 219,
+	220, 221, 222, 223, 0, 23, 0, 0, 0, 0,
+	69, 70, 0, 0, 16, 46, 0, 0, 19, 0,
+	0, 146, 134, 161, 168, 15, 17, 18, 0, 30,
+	31, 32, 33, 34, 35, 36, 50, 37, 0, 0,
+	40, 24, 0, 145, 147, 148, 149, 150, 151, 152,
+	153, 154, 155, 156, 157, 0, 0, 0, 0, 0,
+	0, 0, 213, 187, 0, 212, 216, 125, 0, 133,
+	135, 136, 137, 138, 0, 216, 127, 0, 160, 162,
+	163, 164, 0, 167, 169, 170, 171, 0, 22, 0,
+	48, 51, 0, 61, 0, 38, 42, 43, 39, 41,
+	44, 25, 26, 28, 0, 143, 144, 0, 0, 0,
+	0, 0, 0, 0, 0, 111, 112, 0, 0, 0,
+	0, 130, 131, 132, 0, 0, 116, 117, 118, 0,
+	158, 159, 165, 166, 0, 47, 49, 52, 53, 0,
+	58, 50, 0, 27, 0, 0, 0, 0, 0, 0,
+	0, 0, 108, 0, 0, 0, 88, 0, 96, 97,
+	98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
+	124, 128, 0, 0, 0, 0, 126, 0, 0, 0,
+	54, 0, 57, 50, 0, 62, 0, 0, 0, 0,
+	0, 0, 0, 0, 0, 80, 0, 110, 113, 114,
+	0, 87, 89, 90, 91, 0, 0, 129, 139, 0,
+	141, 0, 115, 119, 122, 120, 121, 123, 0, 222,
+	175, 55, 0, 63, 50, 0, 60, 29, 0, 0,
+	0, 0, 0, 0, 74, 0, 0, 0, 0, 85,
+	86, 0, 0, 0, 0, 0, 174, 56, 0, 0,
+	0, 59, 71, 0, 146, 72, 0, 146, 73, 0,
+	146, 0, 79, 109, 81, 0, 0, 0, 0, 0,
+	64, 50, 65, 50, 66, 0, 0, 0, 0, 0,
+	0, 0, 0, 0, 140, 142, 0, 0, 0, 0,
+	82, 0, 83, 0, 84, 78, 92, 0, 0, 0,
+	67, 68, 75, 76, 77, 0, 0, 0, 0, 94,
+	0, 172, 178, 93, 0, 0, 177, 179, 180, 0,
+	173, 176, 95,
+}
+var protoTok1 = [...]int{
+
+	1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 78, 3, 76, 75, 74, 72, 3,
+	67, 68, 71, 65, 62, 66, 61, 59, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 55, 54,
+	64, 53, 63, 60, 77, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 69, 58, 70, 73, 3, 80, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+	3, 3, 3, 56, 3, 57, 79,
+}
+var protoTok2 = [...]int{
+
+	2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+	12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+	22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+	32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
+	42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
+	52,
+}
+var protoTok3 = [...]int{
+	0,
+}
+
+var protoErrorMessages = [...]struct {
+	state int
+	token int
+	msg   string
+}{}
+
+//line yaccpar:1
+
+/*	parser for yacc output	*/
+
+var (
+	protoDebug        = 0
+	protoErrorVerbose = false
+)
+
+type protoLexer interface {
+	Lex(lval *protoSymType) int
+	Error(s string)
+}
+
+type protoParser interface {
+	Parse(protoLexer) int
+	Lookahead() int
+}
+
+type protoParserImpl struct {
+	lval  protoSymType
+	stack [protoInitialStackSize]protoSymType
+	char  int
+}
+
+func (p *protoParserImpl) Lookahead() int {
+	return p.char
+}
+
+func protoNewParser() protoParser {
+	return &protoParserImpl{}
+}
+
+const protoFlag = -1000
+
+func protoTokname(c int) string {
+	if c >= 1 && c-1 < len(protoToknames) {
+		if protoToknames[c-1] != "" {
+			return protoToknames[c-1]
+		}
+	}
+	return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func protoStatname(s int) string {
+	if s >= 0 && s < len(protoStatenames) {
+		if protoStatenames[s] != "" {
+			return protoStatenames[s]
+		}
+	}
+	return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func protoErrorMessage(state, lookAhead int) string {
+	const TOKSTART = 4
+
+	if !protoErrorVerbose {
+		return "syntax error"
+	}
+
+	for _, e := range protoErrorMessages {
+		if e.state == state && e.token == lookAhead {
+			return "syntax error: " + e.msg
+		}
+	}
+
+	res := "syntax error: unexpected " + protoTokname(lookAhead)
+
+	// To match Bison, suggest at most four expected tokens.
+	expected := make([]int, 0, 4)
+
+	// Look for shiftable tokens.
+	base := protoPact[state]
+	for tok := TOKSTART; tok-1 < len(protoToknames); tok++ {
+		if n := base + tok; n >= 0 && n < protoLast && protoChk[protoAct[n]] == tok {
+			if len(expected) == cap(expected) {
+				return res
+			}
+			expected = append(expected, tok)
+		}
+	}
+
+	if protoDef[state] == -2 {
+		i := 0
+		for protoExca[i] != -1 || protoExca[i+1] != state {
+			i += 2
+		}
+
+		// Look for tokens that we accept or reduce.
+		for i += 2; protoExca[i] >= 0; i += 2 {
+			tok := protoExca[i]
+			if tok < TOKSTART || protoExca[i+1] == 0 {
+				continue
+			}
+			if len(expected) == cap(expected) {
+				return res
+			}
+			expected = append(expected, tok)
+		}
+
+		// If the default action is to accept or reduce, give up.
+		if protoExca[i+1] != 0 {
+			return res
+		}
+	}
+
+	for i, tok := range expected {
+		if i == 0 {
+			res += ", expecting "
+		} else {
+			res += " or "
+		}
+		res += protoTokname(tok)
+	}
+	return res
+}
+
+func protolex1(lex protoLexer, lval *protoSymType) (char, token int) {
+	token = 0
+	char = lex.Lex(lval)
+	if char <= 0 {
+		token = protoTok1[0]
+		goto out
+	}
+	if char < len(protoTok1) {
+		token = protoTok1[char]
+		goto out
+	}
+	if char >= protoPrivate {
+		if char < protoPrivate+len(protoTok2) {
+			token = protoTok2[char-protoPrivate]
+			goto out
+		}
+	}
+	for i := 0; i < len(protoTok3); i += 2 {
+		token = protoTok3[i+0]
+		if token == char {
+			token = protoTok3[i+1]
+			goto out
+		}
+	}
+
+out:
+	if token == 0 {
+		token = protoTok2[1] /* unknown char */
+	}
+	if protoDebug >= 3 {
+		__yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char))
+	}
+	return char, token
+}
+
+func protoParse(protolex protoLexer) int {
+	return protoNewParser().Parse(protolex)
+}
+
+func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int {
+	var proton int
+	var protoVAL protoSymType
+	var protoDollar []protoSymType
+	_ = protoDollar // silence set and not used
+	protoS := protorcvr.stack[:]
+
+	Nerrs := 0   /* number of errors */
+	Errflag := 0 /* error recovery flag */
+	protostate := 0
+	protorcvr.char = -1
+	prototoken := -1 // protorcvr.char translated into internal numbering
+	defer func() {
+		// Make sure we report no lookahead when not parsing.
+		protostate = -1
+		protorcvr.char = -1
+		prototoken = -1
+	}()
+	protop := -1
+	goto protostack
+
+ret0:
+	return 0
+
+ret1:
+	return 1
+
+protostack:
+	/* put a state and value onto the stack */
+	if protoDebug >= 4 {
+		__yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate))
+	}
+
+	protop++
+	if protop >= len(protoS) {
+		nyys := make([]protoSymType, len(protoS)*2)
+		copy(nyys, protoS)
+		protoS = nyys
+	}
+	protoS[protop] = protoVAL
+	protoS[protop].yys = protostate
+
+protonewstate:
+	proton = protoPact[protostate]
+	if proton <= protoFlag {
+		goto protodefault /* simple state */
+	}
+	if protorcvr.char < 0 {
+		protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+	}
+	proton += prototoken
+	if proton < 0 || proton >= protoLast {
+		goto protodefault
+	}
+	proton = protoAct[proton]
+	if protoChk[proton] == prototoken { /* valid shift */
+		protorcvr.char = -1
+		prototoken = -1
+		protoVAL = protorcvr.lval
+		protostate = proton
+		if Errflag > 0 {
+			Errflag--
+		}
+		goto protostack
+	}
+
+protodefault:
+	/* default state action */
+	proton = protoDef[protostate]
+	if proton == -2 {
+		if protorcvr.char < 0 {
+			protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
+		}
+
+		/* look through exception table */
+		xi := 0
+		for {
+			if protoExca[xi+0] == -1 && protoExca[xi+1] == protostate {
+				break
+			}
+			xi += 2
+		}
+		for xi += 2; ; xi += 2 {
+			proton = protoExca[xi+0]
+			if proton < 0 || proton == prototoken {
+				break
+			}
+		}
+		proton = protoExca[xi+1]
+		if proton < 0 {
+			goto ret0
+		}
+	}
+	if proton == 0 {
+		/* error ... attempt to resume parsing */
+		switch Errflag {
+		case 0: /* brand new error */
+			protolex.Error(protoErrorMessage(protostate, prototoken))
+			Nerrs++
+			if protoDebug >= 1 {
+				__yyfmt__.Printf("%s", protoStatname(protostate))
+				__yyfmt__.Printf(" saw %s\n", protoTokname(prototoken))
+			}
+			fallthrough
+
+		case 1, 2: /* incompletely recovered error ... try again */
+			Errflag = 3
+
+			/* find a state where "error" is a legal shift action */
+			for protop >= 0 {
+				proton = protoPact[protoS[protop].yys] + protoErrCode
+				if proton >= 0 && proton < protoLast {
+					protostate = protoAct[proton] /* simulate a shift of "error" */
+					if protoChk[protostate] == protoErrCode {
+						goto protostack
+					}
+				}
+
+				/* the current p has no shift on "error", pop stack */
+				if protoDebug >= 2 {
+					__yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys)
+				}
+				protop--
+			}
+			/* there is no state on the stack with an error shift ... abort */
+			goto ret1
+
+		case 3: /* no shift yet; clobber input char */
+			if protoDebug >= 2 {
+				__yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken))
+			}
+			if prototoken == protoEofCode {
+				goto ret1
+			}
+			protorcvr.char = -1
+			prototoken = -1
+			goto protonewstate /* try again in the same state */
+		}
+	}
+
+	/* reduction by production proton */
+	if protoDebug >= 2 {
+		__yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate))
+	}
+
+	protont := proton
+	protopt := protop
+	_ = protopt // guard against "declared and not used"
+
+	protop -= protoR2[proton]
+	// protop is now the index of $0. Perform the default action. Iff the
+	// reduced production is ε, $1 is possibly out of range.
+	if protop+1 >= len(protoS) {
+		nyys := make([]protoSymType, len(protoS)*2)
+		copy(nyys, protoS)
+		protoS = nyys
+	}
+	protoVAL = protoS[protop+1]
+
+	/* consult goto table to find next state */
+	proton = protoR1[proton]
+	protog := protoPgo[proton]
+	protoj := protog + protoS[protop].yys + 1
+
+	if protoj >= protoLast {
+		protostate = protoAct[protog]
+	} else {
+		protostate = protoAct[protoj]
+		if protoChk[protostate] != -proton {
+			protostate = protoAct[protog]
+		}
+	}
+	// dummy call; replaced with literal code
+	switch protont {
+
+	case 1:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:114
+		{
+			protoVAL.file = &fileNode{syntax: protoDollar[1].syn}
+			protoVAL.file.setRange(protoDollar[1].syn, protoDollar[1].syn)
+			protolex.(*protoLex).res = protoVAL.file
+		}
+	case 2:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:119
+		{
+			protoVAL.file = &fileNode{decls: protoDollar[1].fileDecls}
+			if len(protoDollar[1].fileDecls) > 0 {
+				protoVAL.file.setRange(protoDollar[1].fileDecls[0], protoDollar[1].fileDecls[len(protoDollar[1].fileDecls)-1])
+			}
+			protolex.(*protoLex).res = protoVAL.file
+		}
+	case 3:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:126
+		{
+			protoVAL.file = &fileNode{syntax: protoDollar[1].syn, decls: protoDollar[2].fileDecls}
+			var end node
+			if len(protoDollar[2].fileDecls) > 0 {
+				end = protoDollar[2].fileDecls[len(protoDollar[2].fileDecls)-1]
+			} else {
+				end = protoDollar[1].syn
+			}
+			protoVAL.file.setRange(protoDollar[1].syn, end)
+			protolex.(*protoLex).res = protoVAL.file
+		}
+	case 4:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:137
+		{
+		}
+	case 5:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:140
+		{
+			protoVAL.fileDecls = append(protoDollar[1].fileDecls, protoDollar[2].fileDecls...)
+		}
+	case 7:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:145
+		{
+			protoVAL.fileDecls = []*fileElement{{imp: protoDollar[1].imprt}}
+		}
+	case 8:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:148
+		{
+			protoVAL.fileDecls = []*fileElement{{pkg: protoDollar[1].pkg}}
+		}
+	case 9:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:151
+		{
+			protoVAL.fileDecls = []*fileElement{{option: protoDollar[1].opts[0]}}
+		}
+	case 10:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:154
+		{
+			protoVAL.fileDecls = []*fileElement{{message: protoDollar[1].msg}}
+		}
+	case 11:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:157
+		{
+			protoVAL.fileDecls = []*fileElement{{enum: protoDollar[1].en}}
+		}
+	case 12:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:160
+		{
+			protoVAL.fileDecls = []*fileElement{{extend: protoDollar[1].extend}}
+		}
+	case 13:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:163
+		{
+			protoVAL.fileDecls = []*fileElement{{service: protoDollar[1].svc}}
+		}
+	case 14:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:166
+		{
+			protoVAL.fileDecls = []*fileElement{{empty: protoDollar[1].b}}
+		}
+	case 15:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:170
+		{
+			if protoDollar[3].str.val != "proto2" && protoDollar[3].str.val != "proto3" {
+				lexError(protolex, protoDollar[3].str.start(), "syntax value must be 'proto2' or 'proto3'")
+			}
+			protoVAL.syn = &syntaxNode{syntax: protoDollar[3].str}
+			protoVAL.syn.setRange(protoDollar[1].id, protoDollar[4].b)
+		}
+	case 16:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:178
+		{
+			protoVAL.imprt = &importNode{name: protoDollar[2].str}
+			protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 17:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:182
+		{
+			protoVAL.imprt = &importNode{name: protoDollar[3].str, weak: true}
+			protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+		}
+	case 18:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:186
+		{
+			protoVAL.imprt = &importNode{name: protoDollar[3].str, public: true}
+			protoVAL.imprt.setRange(protoDollar[1].id, protoDollar[4].b)
+		}
+	case 19:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:191
+		{
+			protoVAL.pkg = &packageNode{name: protoDollar[2].id}
+			protoVAL.pkg.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 22:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:199
+		{
+			n := &optionNameNode{parts: protoDollar[2].optNm}
+			n.setRange(protoDollar[2].optNm[0], protoDollar[2].optNm[len(protoDollar[2].optNm)-1])
+			o := &optionNode{name: n, val: protoDollar[4].v}
+			o.setRange(protoDollar[1].id, protoDollar[5].b)
+			protoVAL.opts = []*optionNode{o}
+		}
+	case 23:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:207
+		{
+			protoVAL.optNm = toNameParts(protoDollar[1].id, 0)
+		}
+	case 24:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:210
+		{
+			p := &optionNamePartNode{text: protoDollar[2].id, isExtension: true}
+			p.setRange(protoDollar[1].b, protoDollar[3].b)
+			protoVAL.optNm = []*optionNamePartNode{p}
+		}
+	case 25:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:215
+		{
+			p := &optionNamePartNode{text: protoDollar[2].id, isExtension: true}
+			p.setRange(protoDollar[1].b, protoDollar[3].b)
+			ps := make([]*optionNamePartNode, 1, len(protoDollar[4].optNm)+1)
+			ps[0] = p
+			protoVAL.optNm = append(ps, protoDollar[4].optNm...)
+		}
+	case 27:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:224
+		{
+			protoVAL.optNm = append(protoDollar[1].optNm, protoDollar[2].optNm...)
+		}
+	case 28:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:228
+		{
+			protoVAL.optNm = toNameParts(protoDollar[1].id, 1 /* exclude leading dot */)
+		}
+	case 29:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:231
+		{
+			p := &optionNamePartNode{text: protoDollar[3].id, isExtension: true}
+			p.setRange(protoDollar[2].b, protoDollar[4].b)
+			protoVAL.optNm = []*optionNamePartNode{p}
+		}
+	case 32:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:240
+		{
+			protoVAL.v = protoDollar[1].str
+		}
+	case 33:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:243
+		{
+			protoVAL.v = protoDollar[1].ui
+		}
+	case 34:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:246
+		{
+			protoVAL.v = protoDollar[1].i
+		}
+	case 35:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:249
+		{
+			protoVAL.v = protoDollar[1].f
+		}
+	case 36:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:252
+		{
+			if protoDollar[1].id.val == "true" {
+				protoVAL.v = &boolLiteralNode{basicNode: protoDollar[1].id.basicNode, val: true}
+			} else if protoDollar[1].id.val == "false" {
+				protoVAL.v = &boolLiteralNode{basicNode: protoDollar[1].id.basicNode, val: false}
+			} else if protoDollar[1].id.val == "inf" {
+				f := &floatLiteralNode{val: math.Inf(1)}
+				f.setRange(protoDollar[1].id, protoDollar[1].id)
+				protoVAL.v = f
+			} else if protoDollar[1].id.val == "nan" {
+				f := &floatLiteralNode{val: math.NaN()}
+				f.setRange(protoDollar[1].id, protoDollar[1].id)
+				protoVAL.v = f
+			} else {
+				protoVAL.v = protoDollar[1].id
+			}
+		}
+	case 38:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:271
+		{
+			protoVAL.ui = protoDollar[2].ui
+		}
+	case 39:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:275
+		{
+			if protoDollar[2].ui.val > math.MaxInt64+1 {
+				lexError(protolex, protoDollar[2].ui.start(), fmt.Sprintf("numeric constant %d would underflow (allowed range is %d to %d)", protoDollar[2].ui.val, int64(math.MinInt64), int64(math.MaxInt64)))
+			}
+			protoVAL.i = &negativeIntLiteralNode{val: -int64(protoDollar[2].ui.val)}
+			protoVAL.i.setRange(protoDollar[1].b, protoDollar[2].ui)
+		}
+	case 41:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:284
+		{
+			protoVAL.f = &floatLiteralNode{val: -protoDollar[2].f.val}
+			protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].f)
+		}
+	case 42:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:288
+		{
+			protoVAL.f = &floatLiteralNode{val: protoDollar[2].f.val}
+			protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].f)
+		}
+	case 43:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:292
+		{
+			protoVAL.f = &floatLiteralNode{val: math.Inf(1)}
+			protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].id)
+		}
+	case 44:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:296
+		{
+			protoVAL.f = &floatLiteralNode{val: math.Inf(-1)}
+			protoVAL.f.setRange(protoDollar[1].b, protoDollar[2].id)
+		}
+	case 46:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:302
+		{
+			protoVAL.str = &stringLiteralNode{val: protoDollar[1].str.val + protoDollar[2].str.val}
+			protoVAL.str.setRange(protoDollar[1].str, protoDollar[2].str)
+		}
+	case 47:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:307
+		{
+			a := &aggregateLiteralNode{elements: protoDollar[2].agg}
+			a.setRange(protoDollar[1].b, protoDollar[3].b)
+			protoVAL.v = a
+		}
+	case 49:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:314
+		{
+			protoVAL.agg = append(protoDollar[1].agg, protoDollar[2].agg...)
+		}
+	case 50:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:317
+		{
+			protoVAL.agg = nil
+		}
+	case 52:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:322
+		{
+			protoVAL.agg = protoDollar[1].agg
+		}
+	case 53:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:325
+		{
+			protoVAL.agg = protoDollar[1].agg
+		}
+	case 54:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:329
+		{
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+			a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 55:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:334
+		{
+			s := &sliceLiteralNode{}
+			s.setRange(protoDollar[3].b, protoDollar[4].b)
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+			a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 56:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:341
+		{
+			s := &sliceLiteralNode{elements: protoDollar[4].sl}
+			s.setRange(protoDollar[3].b, protoDollar[5].b)
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+			a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 57:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:348
+		{
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[3].v}
+			a.setRange(protoDollar[1].aggName, protoDollar[3].v)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 58:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:353
+		{
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: protoDollar[2].v}
+			a.setRange(protoDollar[1].aggName, protoDollar[2].v)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 59:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:358
+		{
+			s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+			s.setRange(protoDollar[3].b, protoDollar[5].b)
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+			a.setRange(protoDollar[1].aggName, protoDollar[5].b)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 60:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:365
+		{
+			s := &aggregateLiteralNode{elements: protoDollar[3].agg}
+			s.setRange(protoDollar[2].b, protoDollar[4].b)
+			a := &aggregateEntryNode{name: protoDollar[1].aggName, val: s}
+			a.setRange(protoDollar[1].aggName, protoDollar[4].b)
+			protoVAL.agg = []*aggregateEntryNode{a}
+		}
+	case 61:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:373
+		{
+			protoVAL.aggName = &aggregateNameNode{name: protoDollar[1].id}
+			protoVAL.aggName.setRange(protoDollar[1].id, protoDollar[1].id)
+		}
+	case 62:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:377
+		{
+			protoVAL.aggName = &aggregateNameNode{name: protoDollar[2].id, isExtension: true}
+			protoVAL.aggName.setRange(protoDollar[1].b, protoDollar[3].b)
+		}
+	case 63:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:382
+		{
+			protoVAL.sl = []valueNode{protoDollar[1].v}
+		}
+	case 64:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:385
+		{
+			protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+		}
+	case 65:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:388
+		{
+			protoVAL.sl = append(protoDollar[1].sl, protoDollar[3].v)
+		}
+	case 66:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:391
+		{
+			s := &aggregateLiteralNode{elements: protoDollar[2].agg}
+			s.setRange(protoDollar[1].b, protoDollar[3].b)
+			protoVAL.sl = []valueNode{s}
+		}
+	case 67:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:396
+		{
+			s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+			s.setRange(protoDollar[3].b, protoDollar[5].b)
+			protoVAL.sl = append(protoDollar[1].sl, s)
+		}
+	case 68:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:401
+		{
+			s := &aggregateLiteralNode{elements: protoDollar[4].agg}
+			s.setRange(protoDollar[3].b, protoDollar[5].b)
+			protoVAL.sl = append(protoDollar[1].sl, s)
+		}
+	case 71:
+		protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:410
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+		}
+	case 72:
+		protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:416
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+		}
+	case 73:
+		protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:422
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[6].b)
+		}
+	case 74:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:428
+		{
+			checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+			protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 75:
+		protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:433
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+		}
+	case 76:
+		protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:439
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+		}
+	case 77:
+		protoDollar = protoS[protopt-9 : protopt+1]
+//line proto.y:445
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+			protoVAL.fld = &fieldNode{label: lbl, fldType: protoDollar[2].id, name: protoDollar[3].id, tag: protoDollar[5].ui, options: protoDollar[7].opts}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[9].b)
+		}
+	case 78:
+		protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:451
+		{
+			checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+			protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui, options: protoDollar[6].opts}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[8].b)
+		}
+	case 79:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:457
+		{
+			protoVAL.opts = append(protoDollar[1].opts, protoDollar[3].opts...)
+		}
+	case 81:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:462
+		{
+			n := &optionNameNode{parts: protoDollar[1].optNm}
+			n.setRange(protoDollar[1].optNm[0], protoDollar[1].optNm[len(protoDollar[1].optNm)-1])
+			o := &optionNode{name: n, val: protoDollar[3].v}
+			o.setRange(protoDollar[1].optNm[0], protoDollar[3].v)
+			protoVAL.opts = []*optionNode{o}
+		}
+	case 82:
+		protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:470
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+				lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+			}
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, required: true}
+			protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+			protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+		}
+	case 83:
+		protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:479
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+				lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+			}
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode}
+			protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+			protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+		}
+	case 84:
+		protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:488
+		{
+			checkTag(protolex, protoDollar[5].ui.start(), protoDollar[5].ui.val)
+			if !unicode.IsUpper(rune(protoDollar[3].id.val[0])) {
+				lexError(protolex, protoDollar[3].id.start(), fmt.Sprintf("group %s should have a name that starts with a capital letter", protoDollar[3].id.val))
+			}
+			lbl := &labelNode{basicNode: protoDollar[1].id.basicNode, repeated: true}
+			protoVAL.grp = &groupNode{groupKeyword: protoDollar[2].id, label: lbl, name: protoDollar[3].id, tag: protoDollar[5].ui, decls: protoDollar[7].msgDecls}
+			protoVAL.grp.setRange(protoDollar[1].id, protoDollar[8].b)
+		}
+	case 85:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:498
+		{
+			c := 0
+			for _, el := range protoDollar[4].ooDecls {
+				if el.field != nil {
+					c++
+				}
+			}
+			if c == 0 {
+				lexError(protolex, protoDollar[1].id.start(), "oneof must contain at least one field")
+			}
+			protoVAL.oo = &oneOfNode{name: protoDollar[2].id, decls: protoDollar[4].ooDecls}
+			protoVAL.oo.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 86:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:512
+		{
+			protoVAL.ooDecls = append(protoDollar[1].ooDecls, protoDollar[2].ooDecls...)
+		}
+	case 88:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:516
+		{
+			protoVAL.ooDecls = nil
+		}
+	case 89:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:520
+		{
+			protoVAL.ooDecls = []*oneOfElement{{option: protoDollar[1].opts[0]}}
+		}
+	case 90:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:523
+		{
+			protoVAL.ooDecls = []*oneOfElement{{field: protoDollar[1].fld}}
+		}
+	case 91:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:526
+		{
+			protoVAL.ooDecls = []*oneOfElement{{empty: protoDollar[1].b}}
+		}
+	case 92:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:530
+		{
+			checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+			protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 93:
+		protoDollar = protoS[protopt-8 : protopt+1]
+//line proto.y:535
+		{
+			checkTag(protolex, protoDollar[4].ui.start(), protoDollar[4].ui.val)
+			protoVAL.fld = &fieldNode{fldType: protoDollar[1].id, name: protoDollar[2].id, tag: protoDollar[4].ui, options: protoDollar[6].opts}
+			protoVAL.fld.setRange(protoDollar[1].id, protoDollar[8].b)
+		}
+	case 94:
+		protoDollar = protoS[protopt-10 : protopt+1]
+//line proto.y:541
+		{
+			checkTag(protolex, protoDollar[9].ui.start(), protoDollar[9].ui.val)
+			protoVAL.mapFld = &mapFieldNode{mapKeyword: protoDollar[1].id, keyType: protoDollar[3].id, valueType: protoDollar[5].id, name: protoDollar[7].id, tag: protoDollar[9].ui}
+			protoVAL.mapFld.setRange(protoDollar[1].id, protoDollar[10].b)
+		}
+	case 95:
+		protoDollar = protoS[protopt-13 : protopt+1]
+//line proto.y:546
+		{
+			checkTag(protolex, protoDollar[9].ui.start(), protoDollar[9].ui.val)
+			protoVAL.mapFld = &mapFieldNode{mapKeyword: protoDollar[1].id, keyType: protoDollar[3].id, valueType: protoDollar[5].id, name: protoDollar[7].id, tag: protoDollar[9].ui, options: protoDollar[11].opts}
+			protoVAL.mapFld.setRange(protoDollar[1].id, protoDollar[13].b)
+		}
+	case 108:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:565
+		{
+			protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs}
+			protoVAL.ext.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 109:
+		protoDollar = protoS[protopt-6 : protopt+1]
+//line proto.y:569
+		{
+			protoVAL.ext = &extensionRangeNode{ranges: protoDollar[2].rngs, options: protoDollar[4].opts}
+			protoVAL.ext.setRange(protoDollar[1].id, protoDollar[6].b)
+		}
+	case 110:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:574
+		{
+			protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+		}
+	case 112:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:579
+		{
+			if protoDollar[1].ui.val > internal.MaxTag {
+				lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range includes out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+			}
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[1].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[1].ui.val)}
+			r.setRange(protoDollar[1].ui, protoDollar[1].ui)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 113:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:587
+		{
+			if protoDollar[1].ui.val > internal.MaxTag {
+				lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+			}
+			if protoDollar[3].ui.val > internal.MaxTag {
+				lexError(protolex, protoDollar[3].ui.start(), fmt.Sprintf("range end is out-of-range tag: %d (should be between 0 and %d)", protoDollar[3].ui.val, internal.MaxTag))
+			}
+			if protoDollar[1].ui.val > protoDollar[3].ui.val {
+				lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].ui.val, protoDollar[3].ui.val))
+			}
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[3].ui.val)}
+			r.setRange(protoDollar[1].ui, protoDollar[3].ui)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 114:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:601
+		{
+			if protoDollar[1].ui.val > internal.MaxTag {
+				lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range start is out-of-range tag: %d (should be between 0 and %d)", protoDollar[1].ui.val, internal.MaxTag))
+			}
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].id, st: int32(protoDollar[1].ui.val), en: internal.MaxTag}
+			r.setRange(protoDollar[1].ui, protoDollar[3].id)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 115:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:610
+		{
+			protoVAL.rngs = append(protoDollar[1].rngs, protoDollar[3].rngs...)
+		}
+	case 117:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:615
+		{
+			checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[1].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[1].ui.val)}
+			r.setRange(protoDollar[1].ui, protoDollar[1].ui)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 118:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:621
+		{
+			checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+			r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[1].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[1].i.val)}
+			r.setRange(protoDollar[1].i, protoDollar[1].i)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 119:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:627
+		{
+			checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+			checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+			if protoDollar[1].ui.val > protoDollar[3].ui.val {
+				lexError(protolex, protoDollar[1].ui.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].ui.val, protoDollar[3].ui.val))
+			}
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].ui, st: int32(protoDollar[1].ui.val), en: int32(protoDollar[3].ui.val)}
+			r.setRange(protoDollar[1].ui, protoDollar[3].ui)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 120:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:637
+		{
+			checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+			checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+			if protoDollar[1].i.val > protoDollar[3].i.val {
+				lexError(protolex, protoDollar[1].i.start(), fmt.Sprintf("range, %d to %d, is invalid: start must be <= end", protoDollar[1].i.val, protoDollar[3].i.val))
+			}
+			r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].i, st: int32(protoDollar[1].i.val), en: int32(protoDollar[3].i.val)}
+			r.setRange(protoDollar[1].i, protoDollar[3].i)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 121:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:647
+		{
+			checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+			checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+			r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].ui, st: int32(protoDollar[1].i.val), en: int32(protoDollar[3].ui.val)}
+			r.setRange(protoDollar[1].i, protoDollar[3].ui)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 122:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:654
+		{
+			checkUint64InInt32Range(protolex, protoDollar[1].ui.start(), protoDollar[1].ui.val)
+			r := &rangeNode{stNode: protoDollar[1].ui, enNode: protoDollar[3].id, st: int32(protoDollar[1].ui.val), en: math.MaxInt32}
+			r.setRange(protoDollar[1].ui, protoDollar[3].id)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 123:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:660
+		{
+			checkInt64InInt32Range(protolex, protoDollar[1].i.start(), protoDollar[1].i.val)
+			r := &rangeNode{stNode: protoDollar[1].i, enNode: protoDollar[3].id, st: int32(protoDollar[1].i.val), en: math.MaxInt32}
+			r.setRange(protoDollar[1].i, protoDollar[3].id)
+			protoVAL.rngs = []*rangeNode{r}
+		}
+	case 124:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:667
+		{
+			protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+			protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 126:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:673
+		{
+			protoVAL.resvd = &reservedNode{ranges: protoDollar[2].rngs}
+			protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 128:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:679
+		{
+			rsvd := map[string]struct{}{}
+			for _, n := range protoDollar[2].names {
+				if _, ok := rsvd[n.val]; ok {
+					lexError(protolex, n.start(), fmt.Sprintf("name %q is reserved multiple times", n.val))
+					break
+				}
+				rsvd[n.val] = struct{}{}
+			}
+			protoVAL.resvd = &reservedNode{names: protoDollar[2].names}
+			protoVAL.resvd.setRange(protoDollar[1].id, protoDollar[3].b)
+		}
+	case 129:
+		protoDollar = protoS[protopt-3 : protopt+1]
+//line proto.y:692
+		{
+			protoVAL.names = append(protoDollar[1].names, protoDollar[3].str)
+		}
+	case 130:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:695
+		{
+			protoVAL.names = []*stringLiteralNode{protoDollar[1].str}
+		}
+	case 131:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:699
+		{
+			c := 0
+			for _, el := range protoDollar[4].enDecls {
+				if el.value != nil {
+					c++
+				}
+			}
+			if c == 0 {
+				lexError(protolex, protoDollar[1].id.start(), "enums must define at least one value")
+			}
+			protoVAL.en = &enumNode{name: protoDollar[2].id, decls: protoDollar[4].enDecls}
+			protoVAL.en.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 132:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:713
+		{
+			protoVAL.enDecls = append(protoDollar[1].enDecls, protoDollar[2].enDecls...)
+		}
+	case 134:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:717
+		{
+			protoVAL.enDecls = nil
+		}
+	case 135:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:721
+		{
+			protoVAL.enDecls = []*enumElement{{option: protoDollar[1].opts[0]}}
+		}
+	case 136:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:724
+		{
+			protoVAL.enDecls = []*enumElement{{value: protoDollar[1].env}}
+		}
+	case 137:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:727
+		{
+			protoVAL.enDecls = []*enumElement{{reserved: protoDollar[1].resvd}}
+		}
+	case 138:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:730
+		{
+			protoVAL.enDecls = []*enumElement{{empty: protoDollar[1].b}}
+		}
+	case 139:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:734
+		{
+			checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+			protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberP: protoDollar[3].ui}
+			protoVAL.env.setRange(protoDollar[1].id, protoDollar[4].b)
+		}
+	case 140:
+		protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:739
+		{
+			checkUint64InInt32Range(protolex, protoDollar[3].ui.start(), protoDollar[3].ui.val)
+			protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberP: protoDollar[3].ui, options: protoDollar[5].opts}
+			protoVAL.env.setRange(protoDollar[1].id, protoDollar[7].b)
+		}
+	case 141:
+		protoDollar = protoS[protopt-4 : protopt+1]
+//line proto.y:744
+		{
+			checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+			protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberN: protoDollar[3].i}
+			protoVAL.env.setRange(protoDollar[1].id, protoDollar[4].b)
+		}
+	case 142:
+		protoDollar = protoS[protopt-7 : protopt+1]
+//line proto.y:749
+		{
+			checkInt64InInt32Range(protolex, protoDollar[3].i.start(), protoDollar[3].i.val)
+			protoVAL.env = &enumValueNode{name: protoDollar[1].id, numberN: protoDollar[3].i, options: protoDollar[5].opts}
+			protoVAL.env.setRange(protoDollar[1].id, protoDollar[7].b)
+		}
+	case 143:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:755
+		{
+			protoVAL.msg = &messageNode{name: protoDollar[2].id, decls: protoDollar[4].msgDecls}
+			protoVAL.msg.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 144:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:760
+		{
+			protoVAL.msgDecls = append(protoDollar[1].msgDecls, protoDollar[2].msgDecls...)
+		}
+	case 146:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:764
+		{
+			protoVAL.msgDecls = nil
+		}
+	case 147:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:768
+		{
+			protoVAL.msgDecls = []*messageElement{{field: protoDollar[1].fld}}
+		}
+	case 148:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:771
+		{
+			protoVAL.msgDecls = []*messageElement{{enum: protoDollar[1].en}}
+		}
+	case 149:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:774
+		{
+			protoVAL.msgDecls = []*messageElement{{nested: protoDollar[1].msg}}
+		}
+	case 150:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:777
+		{
+			protoVAL.msgDecls = []*messageElement{{extend: protoDollar[1].extend}}
+		}
+	case 151:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:780
+		{
+			protoVAL.msgDecls = []*messageElement{{extensionRange: protoDollar[1].ext}}
+		}
+	case 152:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:783
+		{
+			protoVAL.msgDecls = []*messageElement{{group: protoDollar[1].grp}}
+		}
+	case 153:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:786
+		{
+			protoVAL.msgDecls = []*messageElement{{option: protoDollar[1].opts[0]}}
+		}
+	case 154:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:789
+		{
+			protoVAL.msgDecls = []*messageElement{{oneOf: protoDollar[1].oo}}
+		}
+	case 155:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:792
+		{
+			protoVAL.msgDecls = []*messageElement{{mapField: protoDollar[1].mapFld}}
+		}
+	case 156:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:795
+		{
+			protoVAL.msgDecls = []*messageElement{{reserved: protoDollar[1].resvd}}
+		}
+	case 157:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:798
+		{
+			protoVAL.msgDecls = []*messageElement{{empty: protoDollar[1].b}}
+		}
+	case 158:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:802
+		{
+			c := 0
+			for _, el := range protoDollar[4].extDecls {
+				if el.field != nil || el.group != nil {
+					c++
+				}
+			}
+			if c == 0 {
+				lexError(protolex, protoDollar[1].id.start(), "extend sections must define at least one extension")
+			}
+			protoVAL.extend = &extendNode{extendee: protoDollar[2].id, decls: protoDollar[4].extDecls}
+			protoVAL.extend.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 159:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:816
+		{
+			protoVAL.extDecls = append(protoDollar[1].extDecls, protoDollar[2].extDecls...)
+		}
+	case 161:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:820
+		{
+			protoVAL.extDecls = nil
+		}
+	case 162:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:824
+		{
+			protoVAL.extDecls = []*extendElement{{field: protoDollar[1].fld}}
+		}
+	case 163:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:827
+		{
+			protoVAL.extDecls = []*extendElement{{group: protoDollar[1].grp}}
+		}
+	case 164:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:830
+		{
+			protoVAL.extDecls = []*extendElement{{empty: protoDollar[1].b}}
+		}
+	case 165:
+		protoDollar = protoS[protopt-5 : protopt+1]
+//line proto.y:834
+		{
+			protoVAL.svc = &serviceNode{name: protoDollar[2].id, decls: protoDollar[4].svcDecls}
+			protoVAL.svc.setRange(protoDollar[1].id, protoDollar[5].b)
+		}
+	case 166:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:839
+		{
+			protoVAL.svcDecls = append(protoDollar[1].svcDecls, protoDollar[2].svcDecls...)
+		}
+	case 168:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:843
+		{
+			protoVAL.svcDecls = nil
+		}
+	case 169:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:850
+		{
+			protoVAL.svcDecls = []*serviceElement{{option: protoDollar[1].opts[0]}}
+		}
+	case 170:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:853
+		{
+			protoVAL.svcDecls = []*serviceElement{{rpc: protoDollar[1].mtd}}
+		}
+	case 171:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:856
+		{
+			protoVAL.svcDecls = []*serviceElement{{empty: protoDollar[1].b}}
+		}
+	case 172:
+		protoDollar = protoS[protopt-10 : protopt+1]
+//line proto.y:860
+		{
+			protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType}
+			protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[10].b)
+		}
+	case 173:
+		protoDollar = protoS[protopt-12 : protopt+1]
+//line proto.y:864
+		{
+			protoVAL.mtd = &methodNode{name: protoDollar[2].id, input: protoDollar[4].rpcType, output: protoDollar[8].rpcType, options: protoDollar[11].opts}
+			protoVAL.mtd.setRange(protoDollar[1].id, protoDollar[12].b)
+		}
+	case 174:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:869
+		{
+			protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[2].id, streamKeyword: protoDollar[1].id}
+			protoVAL.rpcType.setRange(protoDollar[1].id, protoDollar[2].id)
+		}
+	case 175:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:873
+		{
+			protoVAL.rpcType = &rpcTypeNode{msgType: protoDollar[1].id}
+			protoVAL.rpcType.setRange(protoDollar[1].id, protoDollar[1].id)
+		}
+	case 176:
+		protoDollar = protoS[protopt-2 : protopt+1]
+//line proto.y:878
+		{
+			protoVAL.opts = append(protoDollar[1].opts, protoDollar[2].opts...)
+		}
+	case 178:
+		protoDollar = protoS[protopt-0 : protopt+1]
+//line proto.y:882
+		{
+			protoVAL.opts = []*optionNode{}
+		}
+	case 179:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:886
+		{
+			protoVAL.opts = protoDollar[1].opts
+		}
+	case 180:
+		protoDollar = protoS[protopt-1 : protopt+1]
+//line proto.y:889
+		{
+			protoVAL.opts = []*optionNode{}
+		}
+	}
+	goto protostack /* stack new state and value */
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
new file mode 100644
index 0000000..d0a61c2
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/source_code_info.go
@@ -0,0 +1,612 @@
+package protoparse
+
+import (
+	"bytes"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc/internal"
+)
+
+func (r *parseResult) generateSourceCodeInfo() *dpb.SourceCodeInfo {
+	if r.nodes == nil {
+		// skip files that do not have AST info (these will be files
+		// that came from well-known descriptors, instead of from source)
+		return nil
+	}
+
+	sci := sourceCodeInfo{commentsUsed: map[*comment]struct{}{}}
+	path := make([]int32, 0, 10)
+
+	fn := r.getFileNode(r.fd).(*fileNode)
+	if fn.syntax != nil {
+		sci.newLoc(fn.syntax, append(path, internal.File_syntaxTag))
+	}
+	if fn.pkg != nil {
+		sci.newLoc(fn.pkg, append(path, internal.File_packageTag))
+	}
+	for i, imp := range fn.imports {
+		sci.newLoc(imp, append(path, internal.File_dependencyTag, int32(i)))
+	}
+
+	// file options
+	r.generateSourceCodeInfoForOptions(&sci, fn.decls, func(n interface{}) *optionNode {
+		return n.(*fileElement).option
+	}, r.fd.Options.GetUninterpretedOption(), append(path, internal.File_optionsTag))
+
+	// message types
+	for i, msg := range r.fd.GetMessageType() {
+		r.generateSourceCodeInfoForMessage(&sci, msg, append(path, internal.File_messagesTag, int32(i)))
+	}
+
+	// enum types
+	for i, enum := range r.fd.GetEnumType() {
+		r.generateSourceCodeInfoForEnum(&sci, enum, append(path, internal.File_enumsTag, int32(i)))
+	}
+
+	// extension fields
+	for i, ext := range r.fd.GetExtension() {
+		r.generateSourceCodeInfoForField(&sci, ext, append(path, internal.File_extensionsTag, int32(i)))
+	}
+
+	// services and methods
+	for i, svc := range r.fd.GetService() {
+		n := r.getServiceNode(svc).(*serviceNode)
+		svcPath := append(path, internal.File_servicesTag, int32(i))
+		sci.newLoc(n, svcPath)
+		sci.newLoc(n.name, append(svcPath, internal.Service_nameTag))
+
+		// service options
+		r.generateSourceCodeInfoForOptions(&sci, n.decls, func(n interface{}) *optionNode {
+			return n.(*serviceElement).option
+		}, svc.Options.GetUninterpretedOption(), append(svcPath, internal.Service_optionsTag))
+
+		// methods
+		for j, mtd := range svc.GetMethod() {
+			mn := r.getMethodNode(mtd).(*methodNode)
+			mtdPath := append(svcPath, internal.Service_methodsTag, int32(j))
+			sci.newLoc(mn, mtdPath)
+			sci.newLoc(mn.name, append(mtdPath, internal.Method_nameTag))
+
+			sci.newLoc(mn.input.msgType, append(mtdPath, internal.Method_inputTag))
+			if mn.input.streamKeyword != nil {
+				sci.newLoc(mn.input.streamKeyword, append(mtdPath, internal.Method_inputStreamTag))
+			}
+			sci.newLoc(mn.output.msgType, append(mtdPath, internal.Method_outputTag))
+			if mn.output.streamKeyword != nil {
+				sci.newLoc(mn.output.streamKeyword, append(mtdPath, internal.Method_outputStreamTag))
+			}
+
+			// method options
+			r.generateSourceCodeInfoForOptions(&sci, mn.options, func(n interface{}) *optionNode {
+				return n.(*optionNode)
+			}, mtd.Options.GetUninterpretedOption(), append(mtdPath, internal.Method_optionsTag))
+		}
+	}
+	return &dpb.SourceCodeInfo{Location: sci.generateLocs()}
+}
+
+func (r *parseResult) generateSourceCodeInfoForOptions(sci *sourceCodeInfo, elements interface{}, extractor func(interface{}) *optionNode, uninterp []*dpb.UninterpretedOption, path []int32) {
+	// Known options are option node elements that have a corresponding
+	// path in r.interpretedOptions. We'll do those first.
+	rv := reflect.ValueOf(elements)
+	for i := 0; i < rv.Len(); i++ {
+		on := extractor(rv.Index(i).Interface())
+		if on == nil {
+			continue
+		}
+		optPath := r.interpretedOptions[on]
+		if len(optPath) > 0 {
+			p := path
+			if optPath[0] == -1 {
+				// used by "default" and "json_name" field pseudo-options
+				// to attribute path to parent element (since those are
+				// stored directly on the descriptor, not its options)
+				p = make([]int32, len(path)-1)
+				copy(p, path)
+				optPath = optPath[1:]
+			}
+			sci.newLoc(on, append(p, optPath...))
+		}
+	}
+
+	// Now uninterpreted options
+	for i, uo := range uninterp {
+		optPath := append(path, internal.UninterpretedOptionsTag, int32(i))
+		on := r.getOptionNode(uo).(*optionNode)
+		sci.newLoc(on, optPath)
+
+		var valTag int32
+		switch {
+		case uo.IdentifierValue != nil:
+			valTag = internal.Uninterpreted_identTag
+		case uo.PositiveIntValue != nil:
+			valTag = internal.Uninterpreted_posIntTag
+		case uo.NegativeIntValue != nil:
+			valTag = internal.Uninterpreted_negIntTag
+		case uo.DoubleValue != nil:
+			valTag = internal.Uninterpreted_doubleTag
+		case uo.StringValue != nil:
+			valTag = internal.Uninterpreted_stringTag
+		case uo.AggregateValue != nil:
+			valTag = internal.Uninterpreted_aggregateTag
+		}
+		if valTag != 0 {
+			sci.newLoc(on.val, append(optPath, valTag))
+		}
+
+		for j, n := range uo.Name {
+			optNmPath := append(optPath, internal.Uninterpreted_nameTag, int32(j))
+			nn := r.getOptionNamePartNode(n).(*optionNamePartNode)
+			sci.newLoc(nn, optNmPath)
+			sci.newLoc(nn.text, append(optNmPath, internal.UninterpretedName_nameTag))
+		}
+	}
+}
+
+func (r *parseResult) generateSourceCodeInfoForMessage(sci *sourceCodeInfo, msg *dpb.DescriptorProto, path []int32) {
+	n := r.getMessageNode(msg)
+	sci.newLoc(n, path)
+
+	var decls []*messageElement
+	var resvdNames []*stringLiteralNode
+	switch n := n.(type) {
+	case *messageNode:
+		decls = n.decls
+		resvdNames = n.reserved
+	case *groupNode:
+		decls = n.decls
+		resvdNames = n.reserved
+	}
+	if decls == nil {
+		// map entry so nothing else to do
+		return
+	}
+
+	sci.newLoc(n.messageName(), append(path, internal.Message_nameTag))
+
+	// message options
+	r.generateSourceCodeInfoForOptions(sci, decls, func(n interface{}) *optionNode {
+		return n.(*messageElement).option
+	}, msg.Options.GetUninterpretedOption(), append(path, internal.Message_optionsTag))
+
+	// fields
+	for i, fld := range msg.GetField() {
+		r.generateSourceCodeInfoForField(sci, fld, append(path, internal.Message_fieldsTag, int32(i)))
+	}
+
+	// one-ofs
+	for i, ood := range msg.GetOneofDecl() {
+		oon := r.getOneOfNode(ood).(*oneOfNode)
+		ooPath := append(path, internal.Message_oneOfsTag, int32(i))
+		sci.newLoc(oon, ooPath)
+		sci.newLoc(oon.name, append(ooPath, internal.OneOf_nameTag))
+
+		// one-of options
+		r.generateSourceCodeInfoForOptions(sci, oon.decls, func(n interface{}) *optionNode {
+			return n.(*oneOfElement).option
+		}, ood.Options.GetUninterpretedOption(), append(ooPath, internal.OneOf_optionsTag))
+	}
+
+	// nested messages
+	for i, nm := range msg.GetNestedType() {
+		r.generateSourceCodeInfoForMessage(sci, nm, append(path, internal.Message_nestedMessagesTag, int32(i)))
+	}
+
+	// nested enums
+	for i, enum := range msg.GetEnumType() {
+		r.generateSourceCodeInfoForEnum(sci, enum, append(path, internal.Message_enumsTag, int32(i)))
+	}
+
+	// nested extensions
+	for i, ext := range msg.GetExtension() {
+		r.generateSourceCodeInfoForField(sci, ext, append(path, internal.Message_extensionsTag, int32(i)))
+	}
+
+	// extension ranges
+	for i, er := range msg.ExtensionRange {
+		rangePath := append(path, internal.Message_extensionRangeTag, int32(i))
+		rn := r.getExtensionRangeNode(er).(*rangeNode)
+		sci.newLoc(rn, rangePath)
+		sci.newLoc(rn.stNode, append(rangePath, internal.ExtensionRange_startTag))
+		if rn.stNode != rn.enNode {
+			sci.newLoc(rn.enNode, append(rangePath, internal.ExtensionRange_endTag))
+		}
+		// now we have to find the extension decl and options that correspond to this range :(
+		for _, d := range decls {
+			found := false
+			if d.extensionRange != nil {
+				for _, r := range d.extensionRange.ranges {
+					if rn == r {
+						found = true
+						break
+					}
+				}
+			}
+			if found {
+				r.generateSourceCodeInfoForOptions(sci, d.extensionRange.options, func(n interface{}) *optionNode {
+					return n.(*optionNode)
+				}, er.Options.GetUninterpretedOption(), append(rangePath, internal.ExtensionRange_optionsTag))
+				break
+			}
+		}
+	}
+
+	// reserved ranges
+	for i, rr := range msg.ReservedRange {
+		rangePath := append(path, internal.Message_reservedRangeTag, int32(i))
+		rn := r.getMessageReservedRangeNode(rr).(*rangeNode)
+		sci.newLoc(rn, rangePath)
+		sci.newLoc(rn.stNode, append(rangePath, internal.ReservedRange_startTag))
+		if rn.stNode != rn.enNode {
+			sci.newLoc(rn.enNode, append(rangePath, internal.ReservedRange_endTag))
+		}
+	}
+
+	// reserved names
+	for i, n := range resvdNames {
+		sci.newLoc(n, append(path, internal.Message_reservedNameTag, int32(i)))
+	}
+}
+
+func (r *parseResult) generateSourceCodeInfoForEnum(sci *sourceCodeInfo, enum *dpb.EnumDescriptorProto, path []int32) {
+	n := r.getEnumNode(enum).(*enumNode)
+	sci.newLoc(n, path)
+	sci.newLoc(n.name, append(path, internal.Enum_nameTag))
+
+	// enum options
+	r.generateSourceCodeInfoForOptions(sci, n.decls, func(n interface{}) *optionNode {
+		return n.(*enumElement).option
+	}, enum.Options.GetUninterpretedOption(), append(path, internal.Enum_optionsTag))
+
+	// enum values
+	for j, ev := range enum.GetValue() {
+		evn := r.getEnumValueNode(ev).(*enumValueNode)
+		evPath := append(path, internal.Enum_valuesTag, int32(j))
+		sci.newLoc(evn, evPath)
+		sci.newLoc(evn.name, append(evPath, internal.EnumVal_nameTag))
+		sci.newLoc(evn.getNumber(), append(evPath, internal.EnumVal_numberTag))
+
+		// enum value options
+		r.generateSourceCodeInfoForOptions(sci, evn.options, func(n interface{}) *optionNode {
+			return n.(*optionNode)
+		}, ev.Options.GetUninterpretedOption(), append(evPath, internal.EnumVal_optionsTag))
+	}
+
+	// reserved ranges
+	for i, rr := range enum.GetReservedRange() {
+		rangePath := append(path, internal.Enum_reservedRangeTag, int32(i))
+		rn := r.getEnumReservedRangeNode(rr).(*rangeNode)
+		sci.newLoc(rn, rangePath)
+		sci.newLoc(rn.stNode, append(rangePath, internal.ReservedRange_startTag))
+		if rn.stNode != rn.enNode {
+			sci.newLoc(rn.enNode, append(rangePath, internal.ReservedRange_endTag))
+		}
+	}
+
+	// reserved names
+	for i, rn := range n.reserved {
+		sci.newLoc(rn, append(path, internal.Enum_reservedNameTag, int32(i)))
+	}
+}
+
+func (r *parseResult) generateSourceCodeInfoForField(sci *sourceCodeInfo, fld *dpb.FieldDescriptorProto, path []int32) {
+	n := r.getFieldNode(fld)
+
+	isGroup := false
+	var opts []*optionNode
+	var extendee *extendNode
+	switch n := n.(type) {
+	case *fieldNode:
+		opts = n.options
+		extendee = n.extendee
+	case *mapFieldNode:
+		opts = n.options
+	case *groupNode:
+		isGroup = true
+		extendee = n.extendee
+	case *syntheticMapField:
+		// shouldn't get here since we don't recurse into fields from a mapNode
+		// in generateSourceCodeInfoForMessage... but just in case
+		return
+	}
+
+	sci.newLoc(n, path)
+	if !isGroup {
+		sci.newLoc(n.fieldName(), append(path, internal.Field_nameTag))
+		sci.newLoc(n.fieldType(), append(path, internal.Field_typeTag))
+	}
+	if n.fieldLabel() != nil {
+		sci.newLoc(n.fieldLabel(), append(path, internal.Field_labelTag))
+	}
+	sci.newLoc(n.fieldTag(), append(path, internal.Field_numberTag))
+	if extendee != nil {
+		sci.newLoc(extendee.extendee, append(path, internal.Field_extendeeTag))
+	}
+
+	r.generateSourceCodeInfoForOptions(sci, opts, func(n interface{}) *optionNode {
+		return n.(*optionNode)
+	}, fld.Options.GetUninterpretedOption(), append(path, internal.Field_optionsTag))
+}
+
+type sourceCodeInfo struct {
+	locs         []*dpb.SourceCodeInfo_Location
+	commentsUsed map[*comment]struct{}
+}
+
+func (sci *sourceCodeInfo) newLoc(n node, path []int32) {
+	leadingComments := n.leadingComments()
+	trailingComments := n.trailingComments()
+	if sci.commentUsed(leadingComments) {
+		leadingComments = nil
+	}
+	if sci.commentUsed(trailingComments) {
+		trailingComments = nil
+	}
+	detached := groupComments(leadingComments)
+	trail := combineComments(trailingComments)
+	var lead *string
+	if len(leadingComments) > 0 && leadingComments[len(leadingComments)-1].end.Line >= n.start().Line-1 {
+		lead = proto.String(detached[len(detached)-1])
+		detached = detached[:len(detached)-1]
+	}
+	dup := make([]int32, len(path))
+	copy(dup, path)
+	var span []int32
+	if n.start().Line == n.end().Line {
+		span = []int32{int32(n.start().Line) - 1, int32(n.start().Col) - 1, int32(n.end().Col) - 1}
+	} else {
+		span = []int32{int32(n.start().Line) - 1, int32(n.start().Col) - 1, int32(n.end().Line) - 1, int32(n.end().Col) - 1}
+	}
+	sci.locs = append(sci.locs, &dpb.SourceCodeInfo_Location{
+		LeadingDetachedComments: detached,
+		LeadingComments:         lead,
+		TrailingComments:        trail,
+		Path:                    dup,
+		Span:                    span,
+	})
+}
+
+func (sci *sourceCodeInfo) commentUsed(c []*comment) bool {
+	if len(c) == 0 {
+		return false
+	}
+	if _, ok := sci.commentsUsed[c[0]]; ok {
+		return true
+	}
+
+	sci.commentsUsed[c[0]] = struct{}{}
+	return false
+}
+
+func groupComments(comments []*comment) []string {
+	if len(comments) == 0 {
+		return nil
+	}
+
+	var groups []string
+	singleLineStyle := comments[0].text[:2] == "//"
+	line := comments[0].end.Line
+	start := 0
+	for i := 1; i < len(comments); i++ {
+		c := comments[i]
+		prevSingleLine := singleLineStyle
+		singleLineStyle = strings.HasPrefix(comments[i].text, "//")
+		if !singleLineStyle || prevSingleLine != singleLineStyle || c.start.Line > line+1 {
+			// new group!
+			groups = append(groups, *combineComments(comments[start:i]))
+			start = i
+		}
+		line = c.end.Line
+	}
+	// don't forget last group
+	groups = append(groups, *combineComments(comments[start:]))
+
+	return groups
+}
+
+func combineComments(comments []*comment) *string {
+	if len(comments) == 0 {
+		return nil
+	}
+	first := true
+	var buf bytes.Buffer
+	for _, c := range comments {
+		if first {
+			first = false
+		} else {
+			buf.WriteByte('\n')
+		}
+		if c.text[:2] == "//" {
+			buf.WriteString(c.text[2:])
+		} else {
+			lines := strings.Split(c.text[2:len(c.text)-2], "\n")
+			first := true
+			for _, l := range lines {
+				if first {
+					first = false
+				} else {
+					buf.WriteByte('\n')
+				}
+
+				// strip a prefix of whitespace followed by '*'
+				j := 0
+				for j < len(l) {
+					if l[j] != ' ' && l[j] != '\t' {
+						break
+					}
+					j++
+				}
+				if j == len(l) {
+					l = ""
+				} else if l[j] == '*' {
+					l = l[j+1:]
+				} else if j > 0 {
+					l = " " + l[j:]
+				}
+
+				buf.WriteString(l)
+			}
+		}
+	}
+	return proto.String(buf.String())
+}
+
+func (sci *sourceCodeInfo) generateLocs() []*dpb.SourceCodeInfo_Location {
+	// generate intermediate locations: paths between root (inclusive) and the
+	// leaf locations already created, these will not have comments but will
+	// have aggregate span, than runs from min(start pos) to max(end pos) for
+	// all descendent paths.
+
+	if len(sci.locs) == 0 {
+		// nothing to generate
+		return nil
+	}
+
+	var root locTrie
+	for _, loc := range sci.locs {
+		root.add(loc.Path, loc)
+	}
+	root.fillIn()
+	locs := make([]*dpb.SourceCodeInfo_Location, 0, root.countLocs())
+	root.aggregate(&locs)
+	// finally, sort the resulting slice by location
+	sort.Slice(locs, func(i, j int) bool {
+		startI, endI := getSpanPositions(locs[i].Span)
+		startJ, endJ := getSpanPositions(locs[j].Span)
+		cmp := compareSlice(startI, startJ)
+		if cmp == 0 {
+			// if start position is the same, sort by end position _decreasing_
+			// (so enclosing locations will appear before leaves)
+			cmp = -compareSlice(endI, endJ)
+			if cmp == 0 {
+				// start and end position are the same? so break ties using path
+				cmp = compareSlice(locs[i].Path, locs[j].Path)
+			}
+		}
+		return cmp < 0
+	})
+	return locs
+}
+
+type locTrie struct {
+	children map[int32]*locTrie
+	loc      *dpb.SourceCodeInfo_Location
+}
+
+func (t *locTrie) add(path []int32, loc *dpb.SourceCodeInfo_Location) {
+	if len(path) == 0 {
+		t.loc = loc
+		return
+	}
+	child := t.children[path[0]]
+	if child == nil {
+		if t.children == nil {
+			t.children = map[int32]*locTrie{}
+		}
+		child = &locTrie{}
+		t.children[path[0]] = child
+	}
+	child.add(path[1:], loc)
+}
+
+func (t *locTrie) fillIn() {
+	var path []int32
+	var start, end []int32
+	for _, child := range t.children {
+		// recurse
+		child.fillIn()
+		if t.loc == nil {
+			// maintain min(start) and max(end) so we can
+			// populate t.loc below
+			childStart, childEnd := getSpanPositions(child.loc.Span)
+
+			if start == nil {
+				if path == nil {
+					path = child.loc.Path[:len(child.loc.Path)-1]
+				}
+				start = childStart
+				end = childEnd
+			} else {
+				if compareSlice(childStart, start) < 0 {
+					start = childStart
+				}
+				if compareSlice(childEnd, end) > 0 {
+					end = childEnd
+				}
+			}
+		}
+	}
+
+	if t.loc == nil {
+		var span []int32
+		// we don't use append below because we want a new slice
+		// that doesn't share underlying buffer with spans from
+		// any other location
+		if start[0] == end[0] {
+			span = []int32{start[0], start[1], end[1]}
+		} else {
+			span = []int32{start[0], start[1], end[0], end[1]}
+		}
+		t.loc = &dpb.SourceCodeInfo_Location{
+			Path: path,
+			Span: span,
+		}
+	}
+}
+
+func (t *locTrie) countLocs() int {
+	count := 0
+	if t.loc != nil {
+		count = 1
+	}
+	for _, ch := range t.children {
+		count += ch.countLocs()
+	}
+	return count
+}
+
+func (t *locTrie) aggregate(dest *[]*dpb.SourceCodeInfo_Location) {
+	if t.loc != nil {
+		*dest = append(*dest, t.loc)
+	}
+	for _, child := range t.children {
+		child.aggregate(dest)
+	}
+}
+
+func getSpanPositions(span []int32) (start, end []int32) {
+	start = span[:2]
+	if len(span) == 3 {
+		end = []int32{span[0], span[2]}
+	} else {
+		end = span[2:]
+	}
+	return
+}
+
+func compareSlice(a, b []int32) int {
+	end := len(a)
+	if len(b) < end {
+		end = len(b)
+	}
+	for i := 0; i < end; i++ {
+		if a[i] < b[i] {
+			return -1
+		}
+		if a[i] > b[i] {
+			return 1
+		}
+	}
+	if len(a) < len(b) {
+		return -1
+	}
+	if len(a) > len(b) {
+		return 1
+	}
+	return 0
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
new file mode 100644
index 0000000..59bcdd3
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/std_imports.go
@@ -0,0 +1,49 @@
+package protoparse
+
+import (
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	// link in packages that include the standard protos included with protoc
+	_ "github.com/golang/protobuf/protoc-gen-go/plugin"
+	_ "github.com/golang/protobuf/ptypes/any"
+	_ "github.com/golang/protobuf/ptypes/duration"
+	_ "github.com/golang/protobuf/ptypes/empty"
+	_ "github.com/golang/protobuf/ptypes/struct"
+	_ "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/golang/protobuf/ptypes/wrappers"
+	_ "google.golang.org/genproto/protobuf/api"
+	_ "google.golang.org/genproto/protobuf/field_mask"
+	_ "google.golang.org/genproto/protobuf/ptype"
+	_ "google.golang.org/genproto/protobuf/source_context"
+
+	"github.com/jhump/protoreflect/internal"
+)
+
+// All files that are included with protoc are also included with this package
+// so that clients do not need to explicitly supply a copy of these protos (just
+// like callers of protoc do not need to supply them).
+var standardImports map[string]*dpb.FileDescriptorProto
+
+func init() {
+	standardFilenames := []string{
+		"google/protobuf/any.proto",
+		"google/protobuf/api.proto",
+		"google/protobuf/compiler/plugin.proto",
+		"google/protobuf/descriptor.proto",
+		"google/protobuf/duration.proto",
+		"google/protobuf/empty.proto",
+		"google/protobuf/field_mask.proto",
+		"google/protobuf/source_context.proto",
+		"google/protobuf/struct.proto",
+		"google/protobuf/timestamp.proto",
+		"google/protobuf/type.proto",
+		"google/protobuf/wrappers.proto",
+	}
+
+	standardImports = map[string]*dpb.FileDescriptorProto{}
+	for _, fn := range standardFilenames {
+		fd, err := internal.LoadFileDescriptor(fn)
+		if err == nil {
+			standardImports[fn] = fd
+		}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
new file mode 100644
index 0000000..c03fd64
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoparse/test-source-info.txt
@@ -0,0 +1,1696 @@
+---- desc_test_comments.proto ----
+
+
+:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:119:2
+
+
+ > syntax:
+desc_test_comments.proto:8:1
+desc_test_comments.proto:8:19
+    Leading detached comment [0]:
+ This is the first detached comment for the syntax.
+    Leading detached comment [1]:
+
+ This is a second detached comment.
+
+    Leading detached comment [2]:
+ This is a third.
+    Leading comments:
+ Syntax comment...
+    Trailing comments:
+ Syntax trailer.
+
+
+ > package:
+desc_test_comments.proto:12:1
+desc_test_comments.proto:12:17
+    Leading comments:
+ And now the package declaration
+
+
+ > options:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+
+
+ > options > go_package:
+desc_test_comments.proto:15:1
+desc_test_comments.proto:15:75
+    Leading comments:
+ option comments FTW!!!
+
+
+ > dependency:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:18:34
+
+
+ > dependency[0]:
+desc_test_comments.proto:17:1
+desc_test_comments.proto:17:38
+
+
+ > dependency[1]:
+desc_test_comments.proto:18:1
+desc_test_comments.proto:18:34
+
+
+ > message_type:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:89:2
+
+
+ > message_type[0]:
+desc_test_comments.proto:25:1
+desc_test_comments.proto:89:2
+    Leading detached comment [0]:
+ Multiple white space lines (like above) cannot
+ be preserved...
+    Leading comments:
+ We need a request for our RPC service below.
+    Trailing comments:
+ And next we'll need some extensions...
+
+
+ > message_type[0] > name:
+desc_test_comments.proto:25:68
+desc_test_comments.proto:25:75
+    Leading detached comment [0]:
+ detached message name 
+    Leading comments:
+ request with a capital R 
+    Trailing comments:
+ trailer
+
+
+ > message_type[0] > options:
+desc_test_comments.proto:26:3
+desc_test_comments.proto:35:54
+
+
+ > message_type[0] > options > deprecated:
+desc_test_comments.proto:26:3
+desc_test_comments.proto:26:28
+
+
+ > message_type[0] > field:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > field[0]:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:32:92
+    Leading comments:
+ A field comment
+    Trailing comments:
+ field trailer #1...
+
+
+ > message_type[0] > field[0] > label:
+desc_test_comments.proto:29:2
+desc_test_comments.proto:29:10
+
+
+ > message_type[0] > field[0] > type:
+desc_test_comments.proto:29:11
+desc_test_comments.proto:29:16
+
+
+ > message_type[0] > field[0] > name:
+desc_test_comments.proto:29:17
+desc_test_comments.proto:29:20
+
+
+ > message_type[0] > field[0] > number:
+desc_test_comments.proto:29:63
+desc_test_comments.proto:29:64
+    Leading detached comment [0]:
+ detached tag 
+    Leading comments:
+ tag numero uno 
+    Trailing comments:
+ tag trailer
+ that spans multiple lines...
+ more than two. 
+
+
+ > message_type[0] > field[0] > options:
+desc_test_comments.proto:32:5
+desc_test_comments.proto:32:90
+
+
+ > message_type[0] > field[0] > options > packed:
+desc_test_comments.proto:32:5
+desc_test_comments.proto:32:16
+
+
+ > message_type[0] > field[0] > json_name:
+desc_test_comments.proto:32:18
+desc_test_comments.proto:32:35
+
+
+ > message_type[0] > field[0] > options > ffubar:
+desc_test_comments.proto:32:37
+desc_test_comments.proto:32:62
+
+
+ > message_type[0] > field[0] > options > ffubar[0]:
+desc_test_comments.proto:32:37
+desc_test_comments.proto:32:62
+
+
+ > message_type[0] > field[0] > options > ffubarb:
+desc_test_comments.proto:32:64
+desc_test_comments.proto:32:90
+
+
+ > message_type[0] > options > mfubar:
+desc_test_comments.proto:35:20
+desc_test_comments.proto:35:54
+    Leading comments:
+ lead mfubar 
+    Trailing comments:
+ trailing mfubar
+
+
+ > message_type[0] > field[1]:
+desc_test_comments.proto:42:22
+desc_test_comments.proto:43:63
+    Leading detached comment [0]:
+ some detached comments
+    Leading detached comment [1]:
+ some detached comments
+    Leading detached comment [2]:
+ Another field comment
+    Leading comments:
+ label comment 
+
+
+ > message_type[0] > field[1] > label:
+desc_test_comments.proto:42:22
+desc_test_comments.proto:42:30
+
+
+ > message_type[0] > field[1] > type:
+desc_test_comments.proto:42:50
+desc_test_comments.proto:42:56
+    Leading comments:
+ type comment 
+
+
+ > message_type[0] > field[1] > name:
+desc_test_comments.proto:42:76
+desc_test_comments.proto:42:80
+    Leading comments:
+ name comment 
+
+
+ > message_type[0] > field[1] > number:
+desc_test_comments.proto:42:83
+desc_test_comments.proto:42:84
+
+
+ > message_type[0] > field[1] > default_value:
+desc_test_comments.proto:43:23
+desc_test_comments.proto:43:40
+    Leading comments:
+ default lead 
+    Trailing comments:
+ default trail 
+
+
+ > message_type[0] > extension_range:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[0]:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[0] > start:
+desc_test_comments.proto:46:13
+desc_test_comments.proto:46:16
+
+
+ > message_type[0] > extension_range[0] > end:
+desc_test_comments.proto:46:20
+desc_test_comments.proto:46:23
+
+
+ > message_type[0] > extension_range[1]:
+desc_test_comments.proto:47:13
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > start:
+desc_test_comments.proto:47:13
+desc_test_comments.proto:47:16
+
+
+ > message_type[0] > extension_range[1] > end:
+desc_test_comments.proto:47:20
+desc_test_comments.proto:47:23
+
+
+ > message_type[0] > extension_range[1] > options:
+desc_test_comments.proto:47:25
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > extension_range[1] > options > exfubarb:
+desc_test_comments.proto:47:25
+desc_test_comments.proto:47:67
+
+
+ > message_type[0] > extension_range[1] > options > exfubar:
+desc_test_comments.proto:47:69
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > extension_range[1] > options > exfubar[0]:
+desc_test_comments.proto:47:69
+desc_test_comments.proto:47:100
+
+
+ > message_type[0] > reserved_range:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_range[0]:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:58
+
+
+ > message_type[0] > reserved_range[0] > start:
+desc_test_comments.proto:51:50
+desc_test_comments.proto:51:52
+
+
+ > message_type[0] > reserved_range[0] > end:
+desc_test_comments.proto:51:56
+desc_test_comments.proto:51:58
+
+
+ > message_type[0] > reserved_range[1]:
+desc_test_comments.proto:51:60
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_range[1] > start:
+desc_test_comments.proto:51:60
+desc_test_comments.proto:51:62
+
+
+ > message_type[0] > reserved_range[1] > end:
+desc_test_comments.proto:51:66
+desc_test_comments.proto:51:68
+
+
+ > message_type[0] > reserved_name:
+desc_test_comments.proto:52:11
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > reserved_name[0]:
+desc_test_comments.proto:52:11
+desc_test_comments.proto:52:16
+
+
+ > message_type[0] > reserved_name[1]:
+desc_test_comments.proto:52:18
+desc_test_comments.proto:52:23
+
+
+ > message_type[0] > reserved_name[2]:
+desc_test_comments.proto:52:25
+desc_test_comments.proto:52:30
+
+
+ > message_type[0] > field[2]:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+    Leading comments:
+ Group comment
+
+
+ > message_type[0] > nested_type:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > nested_type[0]:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:66:3
+
+
+ > message_type[0] > field[2] > label:
+desc_test_comments.proto:55:2
+desc_test_comments.proto:55:10
+
+
+ > message_type[0] > nested_type[0] > name:
+desc_test_comments.proto:55:34
+desc_test_comments.proto:55:40
+    Leading comments:
+ group name 
+
+
+ > message_type[0] > field[2] > number:
+desc_test_comments.proto:55:43
+desc_test_comments.proto:55:44
+
+
+ > message_type[0] > nested_type[0] > options:
+desc_test_comments.proto:56:3
+desc_test_comments.proto:61:50
+
+
+ > message_type[0] > nested_type[0] > options > mfubar:
+desc_test_comments.proto:56:3
+desc_test_comments.proto:56:38
+
+
+ > message_type[0] > nested_type[0] > field:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:64:27
+
+
+ > message_type[0] > nested_type[0] > field[0]:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:58:27
+
+
+ > message_type[0] > nested_type[0] > field[0] > label:
+desc_test_comments.proto:58:3
+desc_test_comments.proto:58:11
+
+
+ > message_type[0] > nested_type[0] > field[0] > type:
+desc_test_comments.proto:58:12
+desc_test_comments.proto:58:18
+
+
+ > message_type[0] > nested_type[0] > field[0] > name:
+desc_test_comments.proto:58:19
+desc_test_comments.proto:58:22
+
+
+ > message_type[0] > nested_type[0] > field[0] > number:
+desc_test_comments.proto:58:25
+desc_test_comments.proto:58:26
+
+
+ > message_type[0] > nested_type[0] > field[1]:
+desc_test_comments.proto:59:3
+desc_test_comments.proto:59:26
+
+
+ > message_type[0] > nested_type[0] > field[1] > label:
+desc_test_comments.proto:59:3
+desc_test_comments.proto:59:11
+
+
+ > message_type[0] > nested_type[0] > field[1] > type:
+desc_test_comments.proto:59:12
+desc_test_comments.proto:59:17
+
+
+ > message_type[0] > nested_type[0] > field[1] > name:
+desc_test_comments.proto:59:18
+desc_test_comments.proto:59:21
+
+
+ > message_type[0] > nested_type[0] > field[1] > number:
+desc_test_comments.proto:59:24
+desc_test_comments.proto:59:25
+
+
+ > message_type[0] > nested_type[0] > options > no_standard_descriptor_accessor:
+desc_test_comments.proto:61:3
+desc_test_comments.proto:61:50
+
+
+ > message_type[0] > nested_type[0] > field[2]:
+desc_test_comments.proto:64:3
+desc_test_comments.proto:64:27
+    Leading comments:
+ Leading comment...
+    Trailing comments:
+ Trailing comment...
+
+
+ > message_type[0] > nested_type[0] > field[2] > label:
+desc_test_comments.proto:64:3
+desc_test_comments.proto:64:11
+
+
+ > message_type[0] > nested_type[0] > field[2] > type:
+desc_test_comments.proto:64:12
+desc_test_comments.proto:64:18
+
+
+ > message_type[0] > nested_type[0] > field[2] > name:
+desc_test_comments.proto:64:19
+desc_test_comments.proto:64:22
+
+
+ > message_type[0] > nested_type[0] > field[2] > number:
+desc_test_comments.proto:64:25
+desc_test_comments.proto:64:26
+
+
+ > message_type[0] > enum_type:
+desc_test_comments.proto:68:2
+desc_test_comments.proto:88:3
+
+
+ > message_type[0] > enum_type[0]:
+desc_test_comments.proto:68:2
+desc_test_comments.proto:88:3
+
+
+ > message_type[0] > enum_type[0] > name:
+desc_test_comments.proto:68:7
+desc_test_comments.proto:68:22
+    Trailing comments:
+ "super"!
+
+
+ > message_type[0] > enum_type[0] > value:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:85:17
+
+
+ > message_type[0] > enum_type[0] > value[0]:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:72:72
+
+
+ > message_type[0] > enum_type[0] > value[0] > name:
+desc_test_comments.proto:72:3
+desc_test_comments.proto:72:8
+
+
+ > message_type[0] > enum_type[0] > value[0] > number:
+desc_test_comments.proto:72:11
+desc_test_comments.proto:72:12
+
+
+ > message_type[0] > enum_type[0] > value[0] > options:
+desc_test_comments.proto:72:14
+desc_test_comments.proto:72:70
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubars:
+desc_test_comments.proto:72:14
+desc_test_comments.proto:72:42
+
+
+ > message_type[0] > enum_type[0] > value[0] > options > evfubar:
+desc_test_comments.proto:72:44
+desc_test_comments.proto:72:70
+
+
+ > message_type[0] > enum_type[0] > value[1]:
+desc_test_comments.proto:73:3
+desc_test_comments.proto:73:86
+
+
+ > message_type[0] > enum_type[0] > value[1] > name:
+desc_test_comments.proto:73:3
+desc_test_comments.proto:73:8
+
+
+ > message_type[0] > enum_type[0] > value[1] > number:
+desc_test_comments.proto:73:11
+desc_test_comments.proto:73:12
+
+
+ > message_type[0] > enum_type[0] > value[1] > options:
+desc_test_comments.proto:73:15
+desc_test_comments.proto:73:84
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaruf:
+desc_test_comments.proto:73:15
+desc_test_comments.proto:73:43
+
+
+ > message_type[0] > enum_type[0] > value[1] > options > evfubaru:
+desc_test_comments.proto:73:59
+desc_test_comments.proto:73:84
+
+
+ > message_type[0] > enum_type[0] > value[2]:
+desc_test_comments.proto:74:3
+desc_test_comments.proto:74:13
+
+
+ > message_type[0] > enum_type[0] > value[2] > name:
+desc_test_comments.proto:74:3
+desc_test_comments.proto:74:8
+
+
+ > message_type[0] > enum_type[0] > value[2] > number:
+desc_test_comments.proto:74:11
+desc_test_comments.proto:74:12
+
+
+ > message_type[0] > enum_type[0] > value[3]:
+desc_test_comments.proto:75:3
+desc_test_comments.proto:75:14
+
+
+ > message_type[0] > enum_type[0] > value[3] > name:
+desc_test_comments.proto:75:3
+desc_test_comments.proto:75:9
+
+
+ > message_type[0] > enum_type[0] > value[3] > number:
+desc_test_comments.proto:75:12
+desc_test_comments.proto:75:13
+
+
+ > message_type[0] > enum_type[0] > options:
+desc_test_comments.proto:77:3
+desc_test_comments.proto:87:36
+
+
+ > message_type[0] > enum_type[0] > options > efubars:
+desc_test_comments.proto:77:3
+desc_test_comments.proto:77:38
+
+
+ > message_type[0] > enum_type[0] > value[4]:
+desc_test_comments.proto:79:3
+desc_test_comments.proto:79:13
+
+
+ > message_type[0] > enum_type[0] > value[4] > name:
+desc_test_comments.proto:79:3
+desc_test_comments.proto:79:8
+
+
+ > message_type[0] > enum_type[0] > value[4] > number:
+desc_test_comments.proto:79:11
+desc_test_comments.proto:79:12
+
+
+ > message_type[0] > enum_type[0] > value[5]:
+desc_test_comments.proto:80:3
+desc_test_comments.proto:80:15
+
+
+ > message_type[0] > enum_type[0] > value[5] > name:
+desc_test_comments.proto:80:3
+desc_test_comments.proto:80:10
+
+
+ > message_type[0] > enum_type[0] > value[5] > number:
+desc_test_comments.proto:80:13
+desc_test_comments.proto:80:14
+
+
+ > message_type[0] > enum_type[0] > value[6]:
+desc_test_comments.proto:81:3
+desc_test_comments.proto:81:46
+
+
+ > message_type[0] > enum_type[0] > value[6] > name:
+desc_test_comments.proto:81:3
+desc_test_comments.proto:81:10
+
+
+ > message_type[0] > enum_type[0] > value[6] > number:
+desc_test_comments.proto:81:13
+desc_test_comments.proto:81:14
+
+
+ > message_type[0] > enum_type[0] > value[6] > options:
+desc_test_comments.proto:81:16
+desc_test_comments.proto:81:44
+
+
+ > message_type[0] > enum_type[0] > value[6] > options > evfubarsf:
+desc_test_comments.proto:81:16
+desc_test_comments.proto:81:44
+
+
+ > message_type[0] > enum_type[0] > value[7]:
+desc_test_comments.proto:82:3
+desc_test_comments.proto:82:14
+
+
+ > message_type[0] > enum_type[0] > value[7] > name:
+desc_test_comments.proto:82:3
+desc_test_comments.proto:82:9
+
+
+ > message_type[0] > enum_type[0] > value[7] > number:
+desc_test_comments.proto:82:12
+desc_test_comments.proto:82:13
+
+
+ > message_type[0] > enum_type[0] > value[8]:
+desc_test_comments.proto:83:3
+desc_test_comments.proto:83:17
+
+
+ > message_type[0] > enum_type[0] > value[8] > name:
+desc_test_comments.proto:83:3
+desc_test_comments.proto:83:12
+
+
+ > message_type[0] > enum_type[0] > value[8] > number:
+desc_test_comments.proto:83:15
+desc_test_comments.proto:83:16
+
+
+ > message_type[0] > enum_type[0] > value[9]:
+desc_test_comments.proto:84:3
+desc_test_comments.proto:84:13
+
+
+ > message_type[0] > enum_type[0] > value[9] > name:
+desc_test_comments.proto:84:3
+desc_test_comments.proto:84:8
+
+
+ > message_type[0] > enum_type[0] > value[9] > number:
+desc_test_comments.proto:84:11
+desc_test_comments.proto:84:12
+
+
+ > message_type[0] > enum_type[0] > value[10]:
+desc_test_comments.proto:85:3
+desc_test_comments.proto:85:17
+
+
+ > message_type[0] > enum_type[0] > value[10] > name:
+desc_test_comments.proto:85:3
+desc_test_comments.proto:85:9
+
+
+ > message_type[0] > enum_type[0] > value[10] > number:
+desc_test_comments.proto:85:12
+desc_test_comments.proto:85:16
+
+
+ > message_type[0] > enum_type[0] > options > efubar:
+desc_test_comments.proto:87:3
+desc_test_comments.proto:87:36
+
+
+ > extension[0] > extendee:
+desc_test_comments.proto:94:1
+desc_test_comments.proto:94:8
+    Leading comments:
+ extendee comment
+
+
+ > extension[1] > extendee:
+desc_test_comments.proto:94:1
+desc_test_comments.proto:94:8
+
+
+ > extension:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:98:30
+
+
+ > extension[0]:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:96:30
+    Leading comments:
+ comment for guid1
+
+
+ > extension[0] > label:
+desc_test_comments.proto:96:2
+desc_test_comments.proto:96:10
+
+
+ > extension[0] > type:
+desc_test_comments.proto:96:11
+desc_test_comments.proto:96:17
+
+
+ > extension[0] > name:
+desc_test_comments.proto:96:18
+desc_test_comments.proto:96:23
+
+
+ > extension[0] > number:
+desc_test_comments.proto:96:26
+desc_test_comments.proto:96:29
+
+
+ > extension[1]:
+desc_test_comments.proto:98:2
+desc_test_comments.proto:98:30
+    Leading comments:
+ ... and a comment for guid2
+
+
+ > extension[1] > label:
+desc_test_comments.proto:98:2
+desc_test_comments.proto:98:10
+
+
+ > extension[1] > type:
+desc_test_comments.proto:98:11
+desc_test_comments.proto:98:17
+
+
+ > extension[1] > name:
+desc_test_comments.proto:98:18
+desc_test_comments.proto:98:23
+
+
+ > extension[1] > number:
+desc_test_comments.proto:98:26
+desc_test_comments.proto:98:29
+
+
+ > service:
+desc_test_comments.proto:103:1
+desc_test_comments.proto:119:2
+
+
+ > service[0]:
+desc_test_comments.proto:103:1
+desc_test_comments.proto:119:2
+    Leading comments:
+ Service comment
+    Trailing comments:
+ service trailer
+
+
+ > service[0] > name:
+desc_test_comments.proto:103:28
+desc_test_comments.proto:103:38
+    Leading comments:
+ service name 
+
+
+ > service[0] > options:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:108:38
+
+
+ > service[0] > options > sfubar:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:105:40
+
+
+ > service[0] > options > sfubar > id:
+desc_test_comments.proto:104:2
+desc_test_comments.proto:104:36
+
+
+ > service[0] > options > sfubar > name:
+desc_test_comments.proto:105:2
+desc_test_comments.proto:105:40
+
+
+ > service[0] > options > deprecated:
+desc_test_comments.proto:106:2
+desc_test_comments.proto:106:28
+
+
+ > service[0] > options > sfubare:
+desc_test_comments.proto:108:2
+desc_test_comments.proto:108:38
+
+
+ > service[0] > method:
+desc_test_comments.proto:111:2
+desc_test_comments.proto:118:3
+
+
+ > service[0] > method[0]:
+desc_test_comments.proto:111:2
+desc_test_comments.proto:112:70
+    Leading comments:
+ Method comment
+
+
+ > service[0] > method[0] > name:
+desc_test_comments.proto:111:21
+desc_test_comments.proto:111:33
+    Leading comments:
+ rpc name 
+    Trailing comments:
+ comment A 
+
+
+ > service[0] > method[0] > client_streaming:
+desc_test_comments.proto:111:66
+desc_test_comments.proto:111:72
+    Leading comments:
+ comment B 
+
+
+ > service[0] > method[0] > input_type:
+desc_test_comments.proto:111:89
+desc_test_comments.proto:111:96
+    Leading comments:
+ comment C 
+
+
+ > service[0] > method[0] > output_type:
+desc_test_comments.proto:112:43
+desc_test_comments.proto:112:50
+    Leading comments:
+comment E 
+
+
+ > service[0] > method[1]:
+desc_test_comments.proto:114:2
+desc_test_comments.proto:118:3
+
+
+ > service[0] > method[1] > name:
+desc_test_comments.proto:114:6
+desc_test_comments.proto:114:14
+
+
+ > service[0] > method[1] > input_type:
+desc_test_comments.proto:114:16
+desc_test_comments.proto:114:23
+
+
+ > service[0] > method[1] > output_type:
+desc_test_comments.proto:114:34
+desc_test_comments.proto:114:55
+
+
+ > service[0] > method[1] > options:
+desc_test_comments.proto:115:3
+desc_test_comments.proto:117:42
+
+
+ > service[0] > method[1] > options > deprecated:
+desc_test_comments.proto:115:3
+desc_test_comments.proto:115:28
+
+
+ > service[0] > method[1] > options > mtfubar:
+desc_test_comments.proto:116:3
+desc_test_comments.proto:116:39
+
+
+ > service[0] > method[1] > options > mtfubar[0]:
+desc_test_comments.proto:116:3
+desc_test_comments.proto:116:39
+
+
+ > service[0] > method[1] > options > mtfubard:
+desc_test_comments.proto:117:3
+desc_test_comments.proto:117:42
+---- desc_test_options.proto ----
+
+
+:
+desc_test_options.proto:1:1
+desc_test_options.proto:62:34
+
+
+ > syntax:
+desc_test_options.proto:1:1
+desc_test_options.proto:1:19
+
+
+ > options:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > options > go_package:
+desc_test_options.proto:3:1
+desc_test_options.proto:3:73
+
+
+ > package:
+desc_test_options.proto:5:1
+desc_test_options.proto:5:20
+
+
+ > dependency:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > dependency[0]:
+desc_test_options.proto:7:1
+desc_test_options.proto:7:43
+
+
+ > extension[0] > extendee:
+desc_test_options.proto:9:8
+desc_test_options.proto:9:38
+
+
+ > extension:
+desc_test_options.proto:10:2
+desc_test_options.proto:62:34
+
+
+ > extension[0]:
+desc_test_options.proto:10:2
+desc_test_options.proto:10:31
+
+
+ > extension[0] > label:
+desc_test_options.proto:10:2
+desc_test_options.proto:10:10
+
+
+ > extension[0] > type:
+desc_test_options.proto:10:11
+desc_test_options.proto:10:15
+
+
+ > extension[0] > name:
+desc_test_options.proto:10:16
+desc_test_options.proto:10:22
+
+
+ > extension[0] > number:
+desc_test_options.proto:10:25
+desc_test_options.proto:10:30
+
+
+ > extension[1] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[2] > extendee:
+desc_test_options.proto:13:8
+desc_test_options.proto:13:36
+
+
+ > extension[1]:
+desc_test_options.proto:14:2
+desc_test_options.proto:14:33
+
+
+ > extension[1] > label:
+desc_test_options.proto:14:2
+desc_test_options.proto:14:10
+
+
+ > extension[1] > type:
+desc_test_options.proto:14:11
+desc_test_options.proto:14:17
+
+
+ > extension[1] > name:
+desc_test_options.proto:14:18
+desc_test_options.proto:14:24
+
+
+ > extension[1] > number:
+desc_test_options.proto:14:27
+desc_test_options.proto:14:32
+
+
+ > extension[2]:
+desc_test_options.proto:15:2
+desc_test_options.proto:15:33
+
+
+ > extension[2] > label:
+desc_test_options.proto:15:2
+desc_test_options.proto:15:10
+
+
+ > extension[2] > type:
+desc_test_options.proto:15:11
+desc_test_options.proto:15:16
+
+
+ > extension[2] > name:
+desc_test_options.proto:15:17
+desc_test_options.proto:15:24
+
+
+ > extension[2] > number:
+desc_test_options.proto:15:27
+desc_test_options.proto:15:32
+
+
+ > extension[3] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[4] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[5] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[6] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[7] > extendee:
+desc_test_options.proto:18:8
+desc_test_options.proto:18:35
+
+
+ > extension[3]:
+desc_test_options.proto:19:2
+desc_test_options.proto:19:32
+
+
+ > extension[3] > label:
+desc_test_options.proto:19:2
+desc_test_options.proto:19:10
+
+
+ > extension[3] > type:
+desc_test_options.proto:19:11
+desc_test_options.proto:19:16
+
+
+ > extension[3] > name:
+desc_test_options.proto:19:17
+desc_test_options.proto:19:23
+
+
+ > extension[3] > number:
+desc_test_options.proto:19:26
+desc_test_options.proto:19:31
+
+
+ > extension[4]:
+desc_test_options.proto:20:2
+desc_test_options.proto:20:34
+
+
+ > extension[4] > label:
+desc_test_options.proto:20:2
+desc_test_options.proto:20:10
+
+
+ > extension[4] > type:
+desc_test_options.proto:20:11
+desc_test_options.proto:20:17
+
+
+ > extension[4] > name:
+desc_test_options.proto:20:18
+desc_test_options.proto:20:25
+
+
+ > extension[4] > number:
+desc_test_options.proto:20:28
+desc_test_options.proto:20:33
+
+
+ > extension[5]:
+desc_test_options.proto:21:2
+desc_test_options.proto:21:37
+
+
+ > extension[5] > label:
+desc_test_options.proto:21:2
+desc_test_options.proto:21:10
+
+
+ > extension[5] > type:
+desc_test_options.proto:21:11
+desc_test_options.proto:21:19
+
+
+ > extension[5] > name:
+desc_test_options.proto:21:20
+desc_test_options.proto:21:28
+
+
+ > extension[5] > number:
+desc_test_options.proto:21:31
+desc_test_options.proto:21:36
+
+
+ > extension[6]:
+desc_test_options.proto:22:2
+desc_test_options.proto:22:34
+
+
+ > extension[6] > label:
+desc_test_options.proto:22:2
+desc_test_options.proto:22:10
+
+
+ > extension[6] > type:
+desc_test_options.proto:22:11
+desc_test_options.proto:22:17
+
+
+ > extension[6] > name:
+desc_test_options.proto:22:18
+desc_test_options.proto:22:25
+
+
+ > extension[6] > number:
+desc_test_options.proto:22:28
+desc_test_options.proto:22:33
+
+
+ > extension[7]:
+desc_test_options.proto:23:2
+desc_test_options.proto:23:36
+
+
+ > extension[7] > label:
+desc_test_options.proto:23:2
+desc_test_options.proto:23:10
+
+
+ > extension[7] > type:
+desc_test_options.proto:23:11
+desc_test_options.proto:23:18
+
+
+ > extension[7] > name:
+desc_test_options.proto:23:19
+desc_test_options.proto:23:27
+
+
+ > extension[7] > number:
+desc_test_options.proto:23:30
+desc_test_options.proto:23:35
+
+
+ > extension[8] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[9] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[10] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[11] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[12] > extendee:
+desc_test_options.proto:26:8
+desc_test_options.proto:26:40
+
+
+ > extension[8]:
+desc_test_options.proto:27:2
+desc_test_options.proto:27:33
+
+
+ > extension[8] > label:
+desc_test_options.proto:27:2
+desc_test_options.proto:27:10
+
+
+ > extension[8] > type:
+desc_test_options.proto:27:11
+desc_test_options.proto:27:16
+
+
+ > extension[8] > name:
+desc_test_options.proto:27:17
+desc_test_options.proto:27:24
+
+
+ > extension[8] > number:
+desc_test_options.proto:27:27
+desc_test_options.proto:27:32
+
+
+ > extension[9]:
+desc_test_options.proto:28:2
+desc_test_options.proto:28:35
+
+
+ > extension[9] > label:
+desc_test_options.proto:28:2
+desc_test_options.proto:28:10
+
+
+ > extension[9] > type:
+desc_test_options.proto:28:11
+desc_test_options.proto:28:17
+
+
+ > extension[9] > name:
+desc_test_options.proto:28:18
+desc_test_options.proto:28:26
+
+
+ > extension[9] > number:
+desc_test_options.proto:28:29
+desc_test_options.proto:28:34
+
+
+ > extension[10]:
+desc_test_options.proto:29:2
+desc_test_options.proto:29:38
+
+
+ > extension[10] > label:
+desc_test_options.proto:29:2
+desc_test_options.proto:29:10
+
+
+ > extension[10] > type:
+desc_test_options.proto:29:11
+desc_test_options.proto:29:19
+
+
+ > extension[10] > name:
+desc_test_options.proto:29:20
+desc_test_options.proto:29:29
+
+
+ > extension[10] > number:
+desc_test_options.proto:29:32
+desc_test_options.proto:29:37
+
+
+ > extension[11]:
+desc_test_options.proto:30:2
+desc_test_options.proto:30:35
+
+
+ > extension[11] > label:
+desc_test_options.proto:30:2
+desc_test_options.proto:30:10
+
+
+ > extension[11] > type:
+desc_test_options.proto:30:11
+desc_test_options.proto:30:17
+
+
+ > extension[11] > name:
+desc_test_options.proto:30:18
+desc_test_options.proto:30:26
+
+
+ > extension[11] > number:
+desc_test_options.proto:30:29
+desc_test_options.proto:30:34
+
+
+ > extension[12]:
+desc_test_options.proto:31:2
+desc_test_options.proto:31:37
+
+
+ > extension[12] > label:
+desc_test_options.proto:31:2
+desc_test_options.proto:31:10
+
+
+ > extension[12] > type:
+desc_test_options.proto:31:11
+desc_test_options.proto:31:18
+
+
+ > extension[12] > name:
+desc_test_options.proto:31:19
+desc_test_options.proto:31:28
+
+
+ > extension[12] > number:
+desc_test_options.proto:31:31
+desc_test_options.proto:31:36
+
+
+ > extension[13] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[14] > extendee:
+desc_test_options.proto:34:8
+desc_test_options.proto:34:38
+
+
+ > extension[13]:
+desc_test_options.proto:35:2
+desc_test_options.proto:35:46
+
+
+ > extension[13] > label:
+desc_test_options.proto:35:2
+desc_test_options.proto:35:10
+
+
+ > extension[13] > type:
+desc_test_options.proto:35:11
+desc_test_options.proto:35:30
+
+
+ > extension[13] > name:
+desc_test_options.proto:35:31
+desc_test_options.proto:35:37
+
+
+ > extension[13] > number:
+desc_test_options.proto:35:40
+desc_test_options.proto:35:45
+
+
+ > extension[14]:
+desc_test_options.proto:36:2
+desc_test_options.proto:36:44
+
+
+ > extension[14] > label:
+desc_test_options.proto:36:2
+desc_test_options.proto:36:10
+
+
+ > extension[14] > type:
+desc_test_options.proto:36:11
+desc_test_options.proto:36:27
+
+
+ > extension[14] > name:
+desc_test_options.proto:36:28
+desc_test_options.proto:36:35
+
+
+ > extension[14] > number:
+desc_test_options.proto:36:38
+desc_test_options.proto:36:43
+
+
+ > extension[15] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[16] > extendee:
+desc_test_options.proto:39:8
+desc_test_options.proto:39:37
+
+
+ > extension[15]:
+desc_test_options.proto:40:2
+desc_test_options.proto:40:33
+
+
+ > extension[15] > label:
+desc_test_options.proto:40:2
+desc_test_options.proto:40:10
+
+
+ > extension[15] > type:
+desc_test_options.proto:40:11
+desc_test_options.proto:40:16
+
+
+ > extension[15] > name:
+desc_test_options.proto:40:17
+desc_test_options.proto:40:24
+
+
+ > extension[15] > number:
+desc_test_options.proto:40:27
+desc_test_options.proto:40:32
+
+
+ > extension[16]:
+desc_test_options.proto:41:2
+desc_test_options.proto:41:35
+
+
+ > extension[16] > label:
+desc_test_options.proto:41:2
+desc_test_options.proto:41:10
+
+
+ > extension[16] > type:
+desc_test_options.proto:41:11
+desc_test_options.proto:41:17
+
+
+ > extension[16] > name:
+desc_test_options.proto:41:18
+desc_test_options.proto:41:26
+
+
+ > extension[16] > number:
+desc_test_options.proto:41:29
+desc_test_options.proto:41:34
+
+
+ > message_type:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+
+
+ > message_type[0]:
+desc_test_options.proto:45:1
+desc_test_options.proto:48:2
+    Leading comments:
+ Test message used by custom options
+
+
+ > message_type[0] > name:
+desc_test_options.proto:45:9
+desc_test_options.proto:45:28
+
+
+ > message_type[0] > field:
+desc_test_options.proto:46:2
+desc_test_options.proto:47:27
+
+
+ > message_type[0] > field[0]:
+desc_test_options.proto:46:2
+desc_test_options.proto:46:25
+
+
+ > message_type[0] > field[0] > label:
+desc_test_options.proto:46:2
+desc_test_options.proto:46:10
+
+
+ > message_type[0] > field[0] > type:
+desc_test_options.proto:46:11
+desc_test_options.proto:46:17
+
+
+ > message_type[0] > field[0] > name:
+desc_test_options.proto:46:18
+desc_test_options.proto:46:20
+
+
+ > message_type[0] > field[0] > number:
+desc_test_options.proto:46:23
+desc_test_options.proto:46:24
+
+
+ > message_type[0] > field[1]:
+desc_test_options.proto:47:2
+desc_test_options.proto:47:27
+
+
+ > message_type[0] > field[1] > label:
+desc_test_options.proto:47:2
+desc_test_options.proto:47:10
+
+
+ > message_type[0] > field[1] > type:
+desc_test_options.proto:47:11
+desc_test_options.proto:47:17
+
+
+ > message_type[0] > field[1] > name:
+desc_test_options.proto:47:18
+desc_test_options.proto:47:22
+
+
+ > message_type[0] > field[1] > number:
+desc_test_options.proto:47:25
+desc_test_options.proto:47:26
+
+
+ > enum_type:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+
+
+ > enum_type[0]:
+desc_test_options.proto:51:1
+desc_test_options.proto:53:2
+    Leading comments:
+ Test enum used by custom options
+
+
+ > enum_type[0] > name:
+desc_test_options.proto:51:6
+desc_test_options.proto:51:22
+
+
+ > enum_type[0] > value:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:12
+
+
+ > enum_type[0] > value[0]:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:12
+
+
+ > enum_type[0] > value[0] > name:
+desc_test_options.proto:52:2
+desc_test_options.proto:52:7
+
+
+ > enum_type[0] > value[0] > number:
+desc_test_options.proto:52:10
+desc_test_options.proto:52:11
+
+
+ > extension[17] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[18] > extendee:
+desc_test_options.proto:55:8
+desc_test_options.proto:55:45
+
+
+ > extension[17]:
+desc_test_options.proto:56:2
+desc_test_options.proto:56:34
+
+
+ > extension[17] > label:
+desc_test_options.proto:56:2
+desc_test_options.proto:56:10
+
+
+ > extension[17] > type:
+desc_test_options.proto:56:11
+desc_test_options.proto:56:17
+
+
+ > extension[17] > name:
+desc_test_options.proto:56:18
+desc_test_options.proto:56:25
+
+
+ > extension[17] > number:
+desc_test_options.proto:56:28
+desc_test_options.proto:56:33
+
+
+ > extension[18]:
+desc_test_options.proto:57:2
+desc_test_options.proto:57:34
+
+
+ > extension[18] > label:
+desc_test_options.proto:57:2
+desc_test_options.proto:57:10
+
+
+ > extension[18] > type:
+desc_test_options.proto:57:11
+desc_test_options.proto:57:16
+
+
+ > extension[18] > name:
+desc_test_options.proto:57:17
+desc_test_options.proto:57:25
+
+
+ > extension[18] > number:
+desc_test_options.proto:57:28
+desc_test_options.proto:57:33
+
+
+ > extension[19] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[20] > extendee:
+desc_test_options.proto:60:8
+desc_test_options.proto:60:36
+
+
+ > extension[19]:
+desc_test_options.proto:61:2
+desc_test_options.proto:61:34
+
+
+ > extension[19] > label:
+desc_test_options.proto:61:2
+desc_test_options.proto:61:10
+
+
+ > extension[19] > type:
+desc_test_options.proto:61:11
+desc_test_options.proto:61:17
+
+
+ > extension[19] > name:
+desc_test_options.proto:61:18
+desc_test_options.proto:61:25
+
+
+ > extension[19] > number:
+desc_test_options.proto:61:28
+desc_test_options.proto:61:33
+
+
+ > extension[20]:
+desc_test_options.proto:62:2
+desc_test_options.proto:62:34
+
+
+ > extension[20] > label:
+desc_test_options.proto:62:2
+desc_test_options.proto:62:10
+
+
+ > extension[20] > type:
+desc_test_options.proto:62:11
+desc_test_options.proto:62:16
+
+
+ > extension[20] > name:
+desc_test_options.proto:62:17
+desc_test_options.proto:62:25
+
+
+ > extension[20] > number:
+desc_test_options.proto:62:28
+desc_test_options.proto:62:33
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
new file mode 100644
index 0000000..b56e8ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/doc.go
@@ -0,0 +1,7 @@
+// Package protoprint provides a mechanism to generate protobuf source code
+// from descriptors.
+//
+// This can be useful to turn file descriptor sets (produced by protoc) back
+// into proto IDL code. Combined with the protoreflect/builder package, it can
+// also be used to perform code generation of proto source code.
+package protoprint
diff --git a/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
new file mode 100644
index 0000000..d8f7f22
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/desc/protoprint/print.go
@@ -0,0 +1,2288 @@
+package protoprint
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"os"
+	"path/filepath"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/desc/internal"
+	"github.com/jhump/protoreflect/dynamic"
+)
+
+// Printer knows how to format file descriptors as proto source code. Its fields
+// provide some control over how the resulting source file is constructed and
+// formatted.
+type Printer struct {
+	// If true, comments are rendered using "/*" style comments. Otherwise, they
+	// are printed using "//" style line comments.
+	PreferMultiLineStyleComments bool
+
+	// If true, elements are sorted into a canonical order.
+	//
+	// The canonical order for elements in a file follows:
+	//  1. Syntax
+	//  2. Package
+	//  3. Imports (sorted lexically)
+	//  4. Options (sorted by name, standard options before custom options)
+	//  5. Messages (sorted by name)
+	//  6. Enums (sorted by name)
+	//  7. Services (sorted by name)
+	//  8. Extensions (grouped by extendee, sorted by extendee+tag)
+	//
+	// The canonical order of elements in a message follows:
+	//  1. Options (sorted by name, standard options before custom options)
+	//  2. Fields and One-Ofs (sorted by tag; one-ofs interleaved based on the
+	//     minimum tag therein)
+	//  3. Nested Messages (sorted by name)
+	//  4. Nested Enums (sorted by name)
+	//  5. Extension ranges (sorted by starting tag number)
+	//  6. Nested Extensions (grouped by extendee, sorted by extendee+tag)
+	//  7. Reserved ranges (sorted by starting tag number)
+	//  8. Reserved names (sorted lexically)
+	//
+	// Methods are sorted within a service by name and appear after any service
+	// options (which are sorted by name, standard options before custom ones).
+	// Enum values are sorted within an enum, first by numeric value then by
+	// name, and also appear after any enum options.
+	//
+	// Options for fields, enum values, and extension ranges are sorted by name,
+	// standard options before custom ones.
+	SortElements bool
+
+	// The indentation used. Any characters other than spaces or tabs will be
+	// replaced with spaces. If unset/empty, two spaces will be used.
+	Indent string
+
+	// If true, detached comments (between elements) will be ignored.
+	//
+	// Deprecated: Use OmitComments bitmask instead.
+	OmitDetachedComments bool
+
+	// A bitmask of comment types to omit. If unset, all comments will be
+	// included. Use CommentsAll to not print any comments.
+	OmitComments CommentType
+
+	// If true, trailing comments that typically appear on the same line as an
+	// element (option, field, enum value, method) will be printed on a separate
+	// line instead.
+	//
+	// So, with this set, you'll get output like so:
+	//
+	//    // leading comment for field
+	//    repeated string names = 1;
+	//    // trailing comment
+	//
+	// If left false, the printer will try to emit trailing comments on the same
+	// line instead:
+	//
+	//    // leading comment for field
+	//    repeated string names = 1; // trailing comment
+	//
+	// If the trailing comment has more than one line, it will automatically be
+	// forced to the next line. Also, elements that end with "}" instead of ";"
+	// will have trailing comments rendered on the subsequent line.
+	TrailingCommentsOnSeparateLine bool
+
+	// If true, the printed output will eschew any blank lines, which otherwise
+	// appear between descriptor elements and comment blocks. Note that this if
+	// detached comments are being printed, this will cause them to be merged
+	// into the subsequent leading comments. Similarly, any element trailing
+	// comments will be merged into the subsequent leading comments.
+	Compact bool
+
+	// If true, all references to messages, extensions, and enums (such as in
+	// options, field types, and method request and response types) will be
+	// fully-qualified. When left unset, the referenced elements will contain
+	// only as much qualifier as is required.
+	//
+	// For example, if a message is in the same package as the reference, the
+	// simple name can be used. If a message shares some context with the
+	// reference, only the unshared context needs to be included. For example:
+	//
+	//  message Foo {
+	//    message Bar {
+	//      enum Baz {
+	//        ZERO = 0;
+	//        ONE = 1;
+	//      }
+	//    }
+	//
+	//    // This field shares some context as the enum it references: they are
+	//    // both inside of the namespace Foo:
+	//    //    field is "Foo.my_baz"
+	//    //     enum is "Foo.Bar.Baz"
+	//    // So we only need to qualify the reference with the context that they
+	//    // do NOT have in common:
+	//    Bar.Baz my_baz = 1;
+	//  }
+	//
+	// When printing fully-qualified names, they will be preceded by a dot, to
+	// avoid any ambiguity that they might be relative vs. fully-qualified.
+	ForceFullyQualifiedNames bool
+}
+
+// CommentType is a kind of comments in a proto source file. This can be used
+// as a bitmask.
+type CommentType int
+
+const (
+	// CommentsDetached refers to comments that are not "attached" to any
+	// source element. They are attributed to the subsequent element in the
+	// file as "detached" comments.
+	CommentsDetached CommentType = 1 << iota
+	// CommentsTrailing refers to a comment block immediately following an
+	// element in the source file. If another element immediately follows
+	// the trailing comment, it is instead considered a leading comment for
+	// that subsequent element.
+	CommentsTrailing
+	// CommentsLeading refers to a comment block immediately preceding an
+	// element in the source file. For high-level elements (those that have
+	// their own descriptor), these are used as doc comments for that element.
+	CommentsLeading
+	// CommentsTokens refers to any comments (leading, trailing, or detached)
+	// on low-level elements in the file. "High-level" elements have their own
+	// descriptors, e.g. messages, enums, fields, services, and methods. But
+	// comments can appear anywhere (such as around identifiers and keywords,
+	// sprinkled inside the declarations of a high-level element). This class
+	// of comments are for those extra comments sprinkled into the file.
+	CommentsTokens
+
+	// CommentsNonDoc refers to comments that are *not* doc comments. This is a
+	// bitwise union of everything other than CommentsLeading. If you configure
+	// a printer to omit this, only doc comments on descriptor elements will be
+	// included in the printed output.
+	CommentsNonDoc = CommentsDetached | CommentsTrailing | CommentsTokens
+	// CommentsAll indicates all kinds of comments. If you configure a printer
+	// to omit this, no comments will appear in the printed output, even if the
+	// input descriptors had source info and comments.
+	CommentsAll = -1
+)
+
+// PrintProtoFiles prints all of the given file descriptors. The given open
+// function is given a file name and is responsible for creating the outputs and
+// returning the corresponding writer.
+func (p *Printer) PrintProtoFiles(fds []*desc.FileDescriptor, open func(name string) (io.WriteCloser, error)) error {
+	for _, fd := range fds {
+		w, err := open(fd.GetName())
+		if err != nil {
+			return fmt.Errorf("failed to open %s: %v", fd.GetName(), err)
+		}
+		err = func() error {
+			defer w.Close()
+			return p.PrintProtoFile(fd, w)
+		}()
+		if err != nil {
+			return fmt.Errorf("failed to write %s: %v", fd.GetName(), err)
+		}
+	}
+	return nil
+}
+
+// PrintProtosToFileSystem prints all of the given file descriptors to files in
+// the given directory. If file names in the given descriptors include path
+// information, they will be relative to the given root.
+func (p *Printer) PrintProtosToFileSystem(fds []*desc.FileDescriptor, rootDir string) error {
+	return p.PrintProtoFiles(fds, func(name string) (io.WriteCloser, error) {
+		fullPath := filepath.Join(rootDir, name)
+		dir := filepath.Dir(fullPath)
+		if err := os.MkdirAll(dir, os.ModePerm); err != nil {
+			return nil, err
+		}
+		return os.OpenFile(fullPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+	})
+}
+
+// pkg represents a package name
+type pkg string
+
+// imp represents an imported file name
+type imp string
+
+// ident represents an identifier
+type ident string
+
+// option represents a resolved descriptor option
+type option struct {
+	name string
+	val  interface{}
+}
+
+// reservedRange represents a reserved range from a message or enum
+type reservedRange struct {
+	start, end int32
+}
+
+// PrintProtoFile prints the given single file descriptor to the given writer.
+func (p *Printer) PrintProtoFile(fd *desc.FileDescriptor, out io.Writer) error {
+	return p.printProto(fd, out)
+}
+
+// PrintProto prints the given descriptor and returns the resulting string. This
+// can be used to print proto files, but it can also be used to get the proto
+// "source form" for any kind of descriptor, which can be a more user-friendly
+// way to present descriptors that are intended for human consumption.
+func (p *Printer) PrintProtoToString(dsc desc.Descriptor) (string, error) {
+	var buf bytes.Buffer
+	if err := p.printProto(dsc, &buf); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+func (p *Printer) printProto(dsc desc.Descriptor, out io.Writer) error {
+	w := newWriter(out)
+
+	if p.Indent == "" {
+		// default indent to two spaces
+		p.Indent = "  "
+	} else {
+		// indent must be all spaces or tabs, so convert other chars to spaces
+		ind := make([]rune, 0, len(p.Indent))
+		for _, r := range p.Indent {
+			if r == '\t' {
+				ind = append(ind, r)
+			} else {
+				ind = append(ind, ' ')
+			}
+		}
+		p.Indent = string(ind)
+	}
+	if p.OmitDetachedComments {
+		p.OmitComments |= CommentsDetached
+	}
+
+	er := dynamic.ExtensionRegistry{}
+	er.AddExtensionsFromFileRecursively(dsc.GetFile())
+	mf := dynamic.NewMessageFactoryWithExtensionRegistry(&er)
+	fdp := dsc.GetFile().AsFileDescriptorProto()
+	sourceInfo := internal.CreateSourceInfoMap(fdp)
+	extendOptionLocations(sourceInfo)
+
+	path := findElement(dsc)
+	switch d := dsc.(type) {
+	case *desc.FileDescriptor:
+		p.printFile(d, mf, w, sourceInfo)
+	case *desc.MessageDescriptor:
+		p.printMessage(d, mf, w, sourceInfo, path, 0)
+	case *desc.FieldDescriptor:
+		var scope string
+		if md, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+			scope = md.GetFullyQualifiedName()
+		} else {
+			scope = d.GetFile().GetPackage()
+		}
+		if d.IsExtension() {
+			fmt.Fprint(w, "extend ")
+			extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+			p.printElementString(extNameSi, w, 0, p.qualifyName(d.GetFile().GetPackage(), scope, d.GetOwner().GetFullyQualifiedName()))
+			fmt.Fprintln(w, "{")
+
+			p.printField(d, mf, w, sourceInfo, path, scope, 1)
+
+			fmt.Fprintln(w, "}")
+		} else {
+			p.printField(d, mf, w, sourceInfo, path, scope, 0)
+		}
+	case *desc.OneOfDescriptor:
+		md := d.GetOwner()
+		elements := elementAddrs{dsc: md}
+		for i := range md.GetFields() {
+			elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+		}
+		p.printOneOf(d, elements, 0, mf, w, sourceInfo, path[:len(path)-1], 0, path[len(path)-1])
+	case *desc.EnumDescriptor:
+		p.printEnum(d, mf, w, sourceInfo, path, 0)
+	case *desc.EnumValueDescriptor:
+		p.printEnumValue(d, mf, w, sourceInfo, path, 0)
+	case *desc.ServiceDescriptor:
+		p.printService(d, mf, w, sourceInfo, path, 0)
+	case *desc.MethodDescriptor:
+		p.printMethod(d, mf, w, sourceInfo, path, 0)
+	}
+
+	return w.err
+}
+
+func findElement(dsc desc.Descriptor) []int32 {
+	if dsc.GetParent() == nil {
+		return nil
+	}
+	path := findElement(dsc.GetParent())
+	switch d := dsc.(type) {
+	case *desc.MessageDescriptor:
+		if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+			return append(path, internal.Message_nestedMessagesTag, getMessageIndex(d, pm.GetNestedMessageTypes()))
+		}
+		return append(path, internal.File_messagesTag, getMessageIndex(d, d.GetFile().GetMessageTypes()))
+
+	case *desc.FieldDescriptor:
+		if d.IsExtension() {
+			if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+				return append(path, internal.Message_extensionsTag, getFieldIndex(d, pm.GetNestedExtensions()))
+			}
+			return append(path, internal.File_extensionsTag, getFieldIndex(d, d.GetFile().GetExtensions()))
+		}
+		return append(path, internal.Message_fieldsTag, getFieldIndex(d, d.GetOwner().GetFields()))
+
+	case *desc.OneOfDescriptor:
+		return append(path, internal.Message_oneOfsTag, getOneOfIndex(d, d.GetOwner().GetOneOfs()))
+
+	case *desc.EnumDescriptor:
+		if pm, ok := d.GetParent().(*desc.MessageDescriptor); ok {
+			return append(path, internal.Message_enumsTag, getEnumIndex(d, pm.GetNestedEnumTypes()))
+		}
+		return append(path, internal.File_enumsTag, getEnumIndex(d, d.GetFile().GetEnumTypes()))
+
+	case *desc.EnumValueDescriptor:
+		return append(path, internal.Enum_valuesTag, getEnumValueIndex(d, d.GetEnum().GetValues()))
+
+	case *desc.ServiceDescriptor:
+		return append(path, internal.File_servicesTag, getServiceIndex(d, d.GetFile().GetServices()))
+
+	case *desc.MethodDescriptor:
+		return append(path, internal.Service_methodsTag, getMethodIndex(d, d.GetService().GetMethods()))
+
+	default:
+		panic(fmt.Sprintf("unexpected descriptor type: %T", dsc))
+	}
+}
+
+func getMessageIndex(md *desc.MessageDescriptor, list []*desc.MessageDescriptor) int32 {
+	for i := range list {
+		if md == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of message %s", md.GetFullyQualifiedName()))
+}
+
+func getFieldIndex(fd *desc.FieldDescriptor, list []*desc.FieldDescriptor) int32 {
+	for i := range list {
+		if fd == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of field %s", fd.GetFullyQualifiedName()))
+}
+
+func getOneOfIndex(ood *desc.OneOfDescriptor, list []*desc.OneOfDescriptor) int32 {
+	for i := range list {
+		if ood == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of oneof %s", ood.GetFullyQualifiedName()))
+}
+
+func getEnumIndex(ed *desc.EnumDescriptor, list []*desc.EnumDescriptor) int32 {
+	for i := range list {
+		if ed == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of enum %s", ed.GetFullyQualifiedName()))
+}
+
+func getEnumValueIndex(evd *desc.EnumValueDescriptor, list []*desc.EnumValueDescriptor) int32 {
+	for i := range list {
+		if evd == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of enum value %s", evd.GetFullyQualifiedName()))
+}
+
+func getServiceIndex(sd *desc.ServiceDescriptor, list []*desc.ServiceDescriptor) int32 {
+	for i := range list {
+		if sd == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of service %s", sd.GetFullyQualifiedName()))
+}
+
+func getMethodIndex(mtd *desc.MethodDescriptor, list []*desc.MethodDescriptor) int32 {
+	for i := range list {
+		if mtd == list[i] {
+			return int32(i)
+		}
+	}
+	panic(fmt.Sprintf("unable to determine index of method %s", mtd.GetFullyQualifiedName()))
+}
+
+func (p *Printer) newLine(w io.Writer) {
+	if !p.Compact {
+		fmt.Fprintln(w)
+	}
+}
+
+func (p *Printer) printFile(fd *desc.FileDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap) {
+	opts, err := p.extractOptions(fd, fd.GetOptions(), mf)
+	if err != nil {
+		return
+	}
+
+	fdp := fd.AsFileDescriptorProto()
+	path := make([]int32, 1)
+
+	path[0] = internal.File_packageTag
+	sourceInfo.PutIfAbsent(append(path, 0), sourceInfo.Get(path))
+
+	path[0] = internal.File_syntaxTag
+	si := sourceInfo.Get(path)
+	p.printElement(false, si, w, 0, func(w *writer) {
+		syn := fdp.GetSyntax()
+		if syn == "" {
+			syn = "proto2"
+		}
+		fmt.Fprintf(w, "syntax = %q;", syn)
+	})
+	p.newLine(w)
+
+	elements := elementAddrs{dsc: fd, opts: opts}
+	if fdp.Package != nil {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_packageTag, elementIndex: 0, order: -3})
+	}
+	for i := range fd.AsFileDescriptorProto().GetDependency() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_dependencyTag, elementIndex: i, order: -2})
+	}
+	elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.File_optionsTag, -1, opts)...)
+	for i := range fd.GetMessageTypes() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_messagesTag, elementIndex: i})
+	}
+	for i := range fd.GetEnumTypes() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_enumsTag, elementIndex: i})
+	}
+	for i := range fd.GetServices() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_servicesTag, elementIndex: i})
+	}
+	for i := range fd.GetExtensions() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.File_extensionsTag, elementIndex: i})
+	}
+
+	p.sort(elements, sourceInfo, nil)
+
+	pkgName := fd.GetPackage()
+
+	var ext *desc.FieldDescriptor
+	for i, el := range elements.addrs {
+		d := elements.at(el)
+		path = []int32{el.elementType, int32(el.elementIndex)}
+		if el.elementType == internal.File_extensionsTag {
+			fld := d.(*desc.FieldDescriptor)
+			if ext == nil || ext.GetOwner() != fld.GetOwner() {
+				// need to open a new extend block
+				if ext != nil {
+					// close preceding extend block
+					fmt.Fprintln(w, "}")
+				}
+				if i > 0 {
+					p.newLine(w)
+				}
+
+				ext = fld
+				fmt.Fprint(w, "extend ")
+				extNameSi := sourceInfo.Get(append(path, internal.Field_extendeeTag))
+				p.printElementString(extNameSi, w, 0, p.qualifyName(pkgName, pkgName, fld.GetOwner().GetFullyQualifiedName()))
+				fmt.Fprintln(w, "{")
+			} else {
+				p.newLine(w)
+			}
+			p.printField(fld, mf, w, sourceInfo, path, pkgName, 1)
+		} else {
+			if ext != nil {
+				// close preceding extend block
+				fmt.Fprintln(w, "}")
+				ext = nil
+			}
+
+			if i > 0 {
+				p.newLine(w)
+			}
+
+			switch d := d.(type) {
+			case pkg:
+				si := sourceInfo.Get(path)
+				p.printElement(false, si, w, 0, func(w *writer) {
+					fmt.Fprintf(w, "package %s;", d)
+				})
+			case imp:
+				si := sourceInfo.Get(path)
+				p.printElement(false, si, w, 0, func(w *writer) {
+					fmt.Fprintf(w, "import %q;", d)
+				})
+			case []option:
+				p.printOptionsLong(d, w, sourceInfo, path, 0)
+			case *desc.MessageDescriptor:
+				p.printMessage(d, mf, w, sourceInfo, path, 0)
+			case *desc.EnumDescriptor:
+				p.printEnum(d, mf, w, sourceInfo, path, 0)
+			case *desc.ServiceDescriptor:
+				p.printService(d, mf, w, sourceInfo, path, 0)
+			}
+		}
+	}
+
+	if ext != nil {
+		// close trailing extend block
+		fmt.Fprintln(w, "}")
+	}
+}
+
+func (p *Printer) sort(elements elementAddrs, sourceInfo internal.SourceInfoMap, path []int32) {
+	if p.SortElements {
+		// canonical sorted order
+		sort.Stable(elements)
+	} else {
+		// use source order (per location information in SourceCodeInfo); or
+		// if that isn't present use declaration order, but grouped by type
+		sort.Stable(elementSrcOrder{
+			elementAddrs: elements,
+			sourceInfo:   sourceInfo,
+			prefix:       path,
+		})
+	}
+}
+
+func (p *Printer) qualifyName(pkg, scope string, fqn string) string {
+	if p.ForceFullyQualifiedNames {
+		// forcing fully-qualified names; make sure to include preceding dot
+		if fqn[0] == '.' {
+			return fqn
+		}
+		return fmt.Sprintf(".%s", fqn)
+	}
+
+	// compute relative name (so no leading dot)
+	if fqn[0] == '.' {
+		fqn = fqn[1:]
+	}
+	if len(scope) > 0 && scope[len(scope)-1] != '.' {
+		scope = scope + "."
+	}
+	for scope != "" {
+		if strings.HasPrefix(fqn, scope) {
+			return fqn[len(scope):]
+		}
+		if scope == pkg+"." {
+			break
+		}
+		pos := strings.LastIndex(scope[:len(scope)-1], ".")
+		scope = scope[:pos+1]
+	}
+	return fqn
+}
+
+func (p *Printer) typeString(fld *desc.FieldDescriptor, scope string) string {
+	if fld.IsMap() {
+		return fmt.Sprintf("map<%s, %s>", p.typeString(fld.GetMapKeyType(), scope), p.typeString(fld.GetMapValueType(), scope))
+	}
+	switch fld.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
+		return "int32"
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		return "int64"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		return "uint32"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return "uint64"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		return "sint32"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return "sint64"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		return "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		return "sfixed32"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return "sfixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return "float"
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return "double"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return "bool"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return "string"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetEnumType().GetFullyQualifiedName())
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return p.qualifyName(fld.GetFile().GetPackage(), scope, fld.GetMessageType().GetFullyQualifiedName())
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return fld.GetMessageType().GetName()
+	}
+	panic(fmt.Sprintf("invalid type: %v", fld.GetType()))
+}
+
+func (p *Printer) printMessage(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	si := sourceInfo.Get(path)
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+
+		fmt.Fprint(w, "message ")
+		nameSi := sourceInfo.Get(append(path, internal.Message_nameTag))
+		p.printElementString(nameSi, w, indent, md.GetName())
+		fmt.Fprintln(w, "{")
+
+		p.printMessageBody(md, mf, w, sourceInfo, path, indent+1)
+		p.indent(w, indent)
+		fmt.Fprintln(w, "}")
+	})
+}
+
+func (p *Printer) printMessageBody(md *desc.MessageDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	opts, err := p.extractOptions(md, md.GetOptions(), mf)
+	if err != nil {
+		if w.err == nil {
+			w.err = err
+		}
+		return
+	}
+
+	skip := map[interface{}]bool{}
+
+	elements := elementAddrs{dsc: md, opts: opts}
+	elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Message_optionsTag, -1, opts)...)
+	for i := range md.AsDescriptorProto().GetReservedRange() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedRangeTag, elementIndex: i})
+	}
+	for i := range md.AsDescriptorProto().GetReservedName() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_reservedNameTag, elementIndex: i})
+	}
+	for i := range md.AsDescriptorProto().GetExtensionRange() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionRangeTag, elementIndex: i})
+	}
+	for i, fld := range md.GetFields() {
+		if fld.IsMap() || fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP {
+			// we don't emit nested messages for map types or groups since
+			// they get special treatment
+			skip[fld.GetMessageType()] = true
+		}
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_fieldsTag, elementIndex: i})
+	}
+	for i := range md.GetNestedMessageTypes() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_nestedMessagesTag, elementIndex: i})
+	}
+	for i := range md.GetNestedEnumTypes() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_enumsTag, elementIndex: i})
+	}
+	for i := range md.GetNestedExtensions() {
+		elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Message_extensionsTag, elementIndex: i})
+	}
+
+	p.sort(elements, sourceInfo, path)
+
+	pkg := md.GetFile().GetPackage()
+	scope := md.GetFullyQualifiedName()
+
+	var ext *desc.FieldDescriptor
+	for i, el := range elements.addrs {
+		d := elements.at(el)
+		// skip[d] will panic if d is a slice (which it could be for []option),
+		// so just ignore it since we don't try to skip options
+		if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+			// skip this element
+			continue
+		}
+
+		childPath := append(path, el.elementType, int32(el.elementIndex))
+		if el.elementType == internal.Message_extensionsTag {
+			// extension
+			fld := d.(*desc.FieldDescriptor)
+			if ext == nil || ext.GetOwner() != fld.GetOwner() {
+				// need to open a new extend block
+				if ext != nil {
+					// close preceding extend block
+					p.indent(w, indent)
+					fmt.Fprintln(w, "}")
+				}
+				if i > 0 {
+					p.newLine(w)
+				}
+
+				ext = fld
+				p.indent(w, indent)
+				fmt.Fprint(w, "extend ")
+				extNameSi := sourceInfo.Get(append(childPath, internal.Field_extendeeTag))
+				p.printElementString(extNameSi, w, indent, p.qualifyName(pkg, scope, fld.GetOwner().GetFullyQualifiedName()))
+				fmt.Fprintln(w, "{")
+			} else {
+				p.newLine(w)
+			}
+			p.printField(fld, mf, w, sourceInfo, childPath, scope, indent+1)
+		} else {
+			if ext != nil {
+				// close preceding extend block
+				p.indent(w, indent)
+				fmt.Fprintln(w, "}")
+				ext = nil
+			}
+
+			if i > 0 {
+				p.newLine(w)
+			}
+
+			switch d := d.(type) {
+			case []option:
+				p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+			case *desc.FieldDescriptor:
+				ood := d.GetOneOf()
+				if ood == nil {
+					p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+				} else if !skip[ood] {
+					// print the one-of, including all of its fields
+					p.printOneOf(ood, elements, i, mf, w, sourceInfo, path, indent, d.AsFieldDescriptorProto().GetOneofIndex())
+					skip[ood] = true
+				}
+			case *desc.MessageDescriptor:
+				p.printMessage(d, mf, w, sourceInfo, childPath, indent)
+			case *desc.EnumDescriptor:
+				p.printEnum(d, mf, w, sourceInfo, childPath, indent)
+			case *descriptor.DescriptorProto_ExtensionRange:
+				// collapse ranges into a single "extensions" block
+				ranges := []*descriptor.DescriptorProto_ExtensionRange{d}
+				addrs := []elementAddr{el}
+				for idx := i + 1; idx < len(elements.addrs); idx++ {
+					elnext := elements.addrs[idx]
+					if elnext.elementType != el.elementType {
+						break
+					}
+					extr := elements.at(elnext).(*descriptor.DescriptorProto_ExtensionRange)
+					if !areEqual(d.Options, extr.Options, mf) {
+						break
+					}
+					ranges = append(ranges, extr)
+					addrs = append(addrs, elnext)
+					skip[extr] = true
+				}
+				p.printExtensionRanges(md, ranges, addrs, mf, w, sourceInfo, path, indent)
+			case reservedRange:
+				// collapse reserved ranges into a single "reserved" block
+				ranges := []reservedRange{d}
+				addrs := []elementAddr{el}
+				for idx := i + 1; idx < len(elements.addrs); idx++ {
+					elnext := elements.addrs[idx]
+					if elnext.elementType != el.elementType {
+						break
+					}
+					rr := elements.at(elnext).(reservedRange)
+					ranges = append(ranges, rr)
+					addrs = append(addrs, elnext)
+					skip[rr] = true
+				}
+				p.printReservedRanges(ranges, false, addrs, w, sourceInfo, path, indent)
+			case string: // reserved name
+				// collapse reserved names into a single "reserved" block
+				names := []string{d}
+				addrs := []elementAddr{el}
+				for idx := i + 1; idx < len(elements.addrs); idx++ {
+					elnext := elements.addrs[idx]
+					if elnext.elementType != el.elementType {
+						break
+					}
+					rn := elements.at(elnext).(string)
+					names = append(names, rn)
+					addrs = append(addrs, elnext)
+					skip[rn] = true
+				}
+				p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+			}
+		}
+	}
+
+	if ext != nil {
+		// close trailing extend block
+		p.indent(w, indent)
+		fmt.Fprintln(w, "}")
+	}
+}
+
+func areEqual(a, b proto.Message, mf *dynamic.MessageFactory) bool {
+	// proto.Equal doesn't handle unknown extensions very well :(
+	// so we convert to a dynamic message (which should know about all extensions via
+	// extension registry) and then compare
+	return dynamic.MessagesEqual(asDynamicIfPossible(a, mf), asDynamicIfPossible(b, mf))
+}
+
+func asDynamicIfPossible(msg proto.Message, mf *dynamic.MessageFactory) proto.Message {
+	if dm, ok := msg.(*dynamic.Message); ok {
+		return dm
+	} else {
+		md, err := desc.LoadMessageDescriptorForMessage(msg)
+		if err == nil {
+			dm := mf.NewDynamicMessage(md)
+			if dm.ConvertFrom(msg) == nil {
+				return dm
+			}
+		}
+	}
+	return msg
+}
+
+func (p *Printer) printField(fld *desc.FieldDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, scope string, indent int) {
+	var groupPath []int32
+	var si *descriptor.SourceCodeInfo_Location
+	if isGroup(fld) {
+		// compute path to group message type
+		groupPath = make([]int32, len(path)-2)
+		copy(groupPath, path)
+		var groupMsgIndex int32
+		md := fld.GetParent().(*desc.MessageDescriptor)
+		for i, nmd := range md.GetNestedMessageTypes() {
+			if nmd == fld.GetMessageType() {
+				// found it
+				groupMsgIndex = int32(i)
+				break
+			}
+		}
+		groupPath = append(groupPath, internal.Message_nestedMessagesTag, groupMsgIndex)
+
+		// the group message is where the field's comments and position are stored
+		si = sourceInfo.Get(groupPath)
+	} else {
+		si = sourceInfo.Get(path)
+	}
+
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+		if shouldEmitLabel(fld) {
+			locSi := sourceInfo.Get(append(path, internal.Field_labelTag))
+			p.printElementString(locSi, w, indent, labelString(fld.GetLabel()))
+		}
+
+		if isGroup(fld) {
+			fmt.Fprint(w, "group ")
+
+			typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+			p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+			fmt.Fprint(w, "= ")
+
+			numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+			p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+			fmt.Fprintln(w, "{")
+			p.printMessageBody(fld.GetMessageType(), mf, w, sourceInfo, groupPath, indent+1)
+
+			p.indent(w, indent)
+			fmt.Fprintln(w, "}")
+		} else {
+			typeSi := sourceInfo.Get(append(path, internal.Field_typeTag))
+			p.printElementString(typeSi, w, indent, p.typeString(fld, scope))
+
+			nameSi := sourceInfo.Get(append(path, internal.Field_nameTag))
+			p.printElementString(nameSi, w, indent, fld.GetName())
+			fmt.Fprint(w, "= ")
+
+			numSi := sourceInfo.Get(append(path, internal.Field_numberTag))
+			p.printElementString(numSi, w, indent, fmt.Sprintf("%d", fld.GetNumber()))
+
+			opts, err := p.extractOptions(fld, fld.GetOptions(), mf)
+			if err != nil {
+				if w.err == nil {
+					w.err = err
+				}
+				return
+			}
+
+			// we use negative values for "extras" keys so they can't collide
+			// with legit option tags
+
+			if !fld.GetFile().IsProto3() && fld.AsFieldDescriptorProto().DefaultValue != nil {
+				defVal := fld.GetDefaultValue()
+				if fld.GetEnumType() != nil {
+					defVal = fld.GetEnumType().FindValueByNumber(defVal.(int32))
+				}
+				opts[-internal.Field_defaultTag] = []option{{name: "default", val: defVal}}
+			}
+
+			jsn := fld.AsFieldDescriptorProto().GetJsonName()
+			if jsn != "" && jsn != internal.JsonName(fld.GetName()) {
+				opts[-internal.Field_jsonNameTag] = []option{{name: "json_name", val: jsn}}
+			}
+
+			elements := elementAddrs{dsc: fld, opts: opts}
+			elements.addrs = optionsAsElementAddrs(internal.Field_optionsTag, 0, opts)
+			p.sort(elements, sourceInfo, path)
+			p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+
+			fmt.Fprint(w, ";")
+		}
+	})
+}
+
+func shouldEmitLabel(fld *desc.FieldDescriptor) bool {
+	return !fld.IsMap() && fld.GetOneOf() == nil && (fld.GetLabel() != descriptor.FieldDescriptorProto_LABEL_OPTIONAL || !fld.GetFile().IsProto3())
+}
+
+func labelString(lbl descriptor.FieldDescriptorProto_Label) string {
+	switch lbl {
+	case descriptor.FieldDescriptorProto_LABEL_OPTIONAL:
+		return "optional"
+	case descriptor.FieldDescriptorProto_LABEL_REQUIRED:
+		return "required"
+	case descriptor.FieldDescriptorProto_LABEL_REPEATED:
+		return "repeated"
+	}
+	panic(fmt.Sprintf("invalid label: %v", lbl))
+}
+
+func isGroup(fld *desc.FieldDescriptor) bool {
+	return fld.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+}
+
+func (p *Printer) printOneOf(ood *desc.OneOfDescriptor, parentElements elementAddrs, startFieldIndex int, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int, ooIndex int32) {
+	oopath := append(parentPath, internal.Message_oneOfsTag, ooIndex)
+	oosi := sourceInfo.Get(oopath)
+	p.printElement(true, oosi, w, indent, func(w *writer) {
+		p.indent(w, indent)
+		fmt.Fprint(w, "oneof ")
+		extNameSi := sourceInfo.Get(append(oopath, internal.OneOf_nameTag))
+		p.printElementString(extNameSi, w, indent, ood.GetName())
+		fmt.Fprintln(w, "{")
+
+		indent++
+		opts, err := p.extractOptions(ood, ood.GetOptions(), mf)
+		if err != nil {
+			if w.err == nil {
+				w.err = err
+			}
+			return
+		}
+
+		elements := elementAddrs{dsc: ood, opts: opts}
+		elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.OneOf_optionsTag, -1, opts)...)
+
+		count := len(ood.GetChoices())
+		for idx := startFieldIndex; count > 0 && idx < len(parentElements.addrs); idx++ {
+			el := parentElements.addrs[idx]
+			if el.elementType != internal.Message_fieldsTag {
+				continue
+			}
+			if parentElements.at(el).(*desc.FieldDescriptor).GetOneOf() == ood {
+				// negative tag indicates that this element is actually a sibling, not a child
+				elements.addrs = append(elements.addrs, elementAddr{elementType: -internal.Message_fieldsTag, elementIndex: el.elementIndex})
+				count--
+			}
+		}
+
+		p.sort(elements, sourceInfo, oopath)
+
+		scope := ood.GetOwner().GetFullyQualifiedName()
+
+		for i, el := range elements.addrs {
+			if i > 0 {
+				p.newLine(w)
+			}
+
+			switch d := elements.at(el).(type) {
+			case []option:
+				childPath := append(oopath, el.elementType, int32(el.elementIndex))
+				p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+			case *desc.FieldDescriptor:
+				childPath := append(parentPath, -el.elementType, int32(el.elementIndex))
+				p.printField(d, mf, w, sourceInfo, childPath, scope, indent)
+			}
+		}
+
+		p.indent(w, indent-1)
+		fmt.Fprintln(w, "}")
+	})
+}
+
+func (p *Printer) printExtensionRanges(parent *desc.MessageDescriptor, ranges []*descriptor.DescriptorProto_ExtensionRange, addrs []elementAddr, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+	p.indent(w, indent)
+	fmt.Fprint(w, "extensions ")
+
+	var opts *descriptor.ExtensionRangeOptions
+	var elPath []int32
+	first := true
+	for i, extr := range ranges {
+		if first {
+			first = false
+		} else {
+			fmt.Fprint(w, ", ")
+		}
+		opts = extr.Options
+		el := addrs[i]
+		elPath = append(parentPath, el.elementType, int32(el.elementIndex))
+		si := sourceInfo.Get(elPath)
+		p.printElement(true, si, w, inline(indent), func(w *writer) {
+			if extr.GetStart() == extr.GetEnd()-1 {
+				fmt.Fprintf(w, "%d ", extr.GetStart())
+			} else if extr.GetEnd()-1 == internal.MaxTag {
+				fmt.Fprintf(w, "%d to max ", extr.GetStart())
+			} else {
+				fmt.Fprintf(w, "%d to %d ", extr.GetStart(), extr.GetEnd()-1)
+			}
+		})
+	}
+	dsc := extensionRange{owner: parent, extRange: ranges[0]}
+	p.printOptionsShort(dsc, opts, mf, internal.ExtensionRange_optionsTag, w, sourceInfo, elPath, indent)
+
+	fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedRanges(ranges []reservedRange, isEnum bool, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+	p.indent(w, indent)
+	fmt.Fprint(w, "reserved ")
+
+	first := true
+	for i, rr := range ranges {
+		if first {
+			first = false
+		} else {
+			fmt.Fprint(w, ", ")
+		}
+		el := addrs[i]
+		si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+		p.printElement(false, si, w, inline(indent), func(w *writer) {
+			if rr.start == rr.end {
+				fmt.Fprintf(w, "%d ", rr.start)
+			} else if (rr.end == internal.MaxTag && !isEnum) ||
+				(rr.end == math.MaxInt32 && isEnum) {
+				fmt.Fprintf(w, "%d to max ", rr.start)
+			} else {
+				fmt.Fprintf(w, "%d to %d ", rr.start, rr.end)
+			}
+		})
+	}
+
+	fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printReservedNames(names []string, addrs []elementAddr, w *writer, sourceInfo internal.SourceInfoMap, parentPath []int32, indent int) {
+	p.indent(w, indent)
+	fmt.Fprint(w, "reserved ")
+
+	first := true
+	for i, name := range names {
+		if first {
+			first = false
+		} else {
+			fmt.Fprint(w, ", ")
+		}
+		el := addrs[i]
+		si := sourceInfo.Get(append(parentPath, el.elementType, int32(el.elementIndex)))
+		p.printElementString(si, w, indent, quotedString(name))
+	}
+
+	fmt.Fprintln(w, ";")
+}
+
+func (p *Printer) printEnum(ed *desc.EnumDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	si := sourceInfo.Get(path)
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+
+		fmt.Fprint(w, "enum ")
+		nameSi := sourceInfo.Get(append(path, internal.Enum_nameTag))
+		p.printElementString(nameSi, w, indent, ed.GetName())
+		fmt.Fprintln(w, "{")
+
+		indent++
+		opts, err := p.extractOptions(ed, ed.GetOptions(), mf)
+		if err != nil {
+			if w.err == nil {
+				w.err = err
+			}
+			return
+		}
+
+		skip := map[interface{}]bool{}
+
+		elements := elementAddrs{dsc: ed, opts: opts}
+		elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Enum_optionsTag, -1, opts)...)
+		for i := range ed.GetValues() {
+			elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_valuesTag, elementIndex: i})
+		}
+		for i := range ed.AsEnumDescriptorProto().GetReservedRange() {
+			elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedRangeTag, elementIndex: i})
+		}
+		for i := range ed.AsEnumDescriptorProto().GetReservedName() {
+			elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Enum_reservedNameTag, elementIndex: i})
+		}
+
+		p.sort(elements, sourceInfo, path)
+
+		for i, el := range elements.addrs {
+			d := elements.at(el)
+
+			// skip[d] will panic if d is a slice (which it could be for []option),
+			// so just ignore it since we don't try to skip options
+			if reflect.TypeOf(d).Kind() != reflect.Slice && skip[d] {
+				// skip this element
+				continue
+			}
+
+			if i > 0 {
+				p.newLine(w)
+			}
+
+			childPath := append(path, el.elementType, int32(el.elementIndex))
+
+			switch d := d.(type) {
+			case []option:
+				p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+			case *desc.EnumValueDescriptor:
+				p.printEnumValue(d, mf, w, sourceInfo, childPath, indent)
+			case reservedRange:
+				// collapse reserved ranges into a single "reserved" block
+				ranges := []reservedRange{d}
+				addrs := []elementAddr{el}
+				for idx := i + 1; idx < len(elements.addrs); idx++ {
+					elnext := elements.addrs[idx]
+					if elnext.elementType != el.elementType {
+						break
+					}
+					rr := elements.at(elnext).(reservedRange)
+					ranges = append(ranges, rr)
+					addrs = append(addrs, elnext)
+					skip[rr] = true
+				}
+				p.printReservedRanges(ranges, true, addrs, w, sourceInfo, path, indent)
+			case string: // reserved name
+				// collapse reserved names into a single "reserved" block
+				names := []string{d}
+				addrs := []elementAddr{el}
+				for idx := i + 1; idx < len(elements.addrs); idx++ {
+					elnext := elements.addrs[idx]
+					if elnext.elementType != el.elementType {
+						break
+					}
+					rn := elements.at(elnext).(string)
+					names = append(names, rn)
+					addrs = append(addrs, elnext)
+					skip[rn] = true
+				}
+				p.printReservedNames(names, addrs, w, sourceInfo, path, indent)
+			}
+		}
+
+		p.indent(w, indent-1)
+		fmt.Fprintln(w, "}")
+	})
+}
+
+func (p *Printer) printEnumValue(evd *desc.EnumValueDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	si := sourceInfo.Get(path)
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+
+		nameSi := sourceInfo.Get(append(path, internal.EnumVal_nameTag))
+		p.printElementString(nameSi, w, indent, evd.GetName())
+		fmt.Fprint(w, "= ")
+
+		numSi := sourceInfo.Get(append(path, internal.EnumVal_numberTag))
+		p.printElementString(numSi, w, indent, fmt.Sprintf("%d", evd.GetNumber()))
+
+		p.printOptionsShort(evd, evd.GetOptions(), mf, internal.EnumVal_optionsTag, w, sourceInfo, path, indent)
+
+		fmt.Fprint(w, ";")
+	})
+}
+
+func (p *Printer) printService(sd *desc.ServiceDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	si := sourceInfo.Get(path)
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+
+		fmt.Fprint(w, "service ")
+		nameSi := sourceInfo.Get(append(path, internal.Service_nameTag))
+		p.printElementString(nameSi, w, indent, sd.GetName())
+		fmt.Fprintln(w, "{")
+
+		indent++
+
+		opts, err := p.extractOptions(sd, sd.GetOptions(), mf)
+		if err != nil {
+			if w.err == nil {
+				w.err = err
+			}
+			return
+		}
+
+		elements := elementAddrs{dsc: sd, opts: opts}
+		elements.addrs = append(elements.addrs, optionsAsElementAddrs(internal.Service_optionsTag, -1, opts)...)
+		for i := range sd.GetMethods() {
+			elements.addrs = append(elements.addrs, elementAddr{elementType: internal.Service_methodsTag, elementIndex: i})
+		}
+
+		p.sort(elements, sourceInfo, path)
+
+		for i, el := range elements.addrs {
+			if i > 0 {
+				p.newLine(w)
+			}
+
+			childPath := append(path, el.elementType, int32(el.elementIndex))
+
+			switch d := elements.at(el).(type) {
+			case []option:
+				p.printOptionsLong(d, w, sourceInfo, childPath, indent)
+			case *desc.MethodDescriptor:
+				p.printMethod(d, mf, w, sourceInfo, childPath, indent)
+			}
+		}
+
+		p.indent(w, indent-1)
+		fmt.Fprintln(w, "}")
+	})
+}
+
+func (p *Printer) printMethod(mtd *desc.MethodDescriptor, mf *dynamic.MessageFactory, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	si := sourceInfo.Get(path)
+	pkg := mtd.GetFile().GetPackage()
+	p.printElement(true, si, w, indent, func(w *writer) {
+		p.indent(w, indent)
+
+		fmt.Fprint(w, "rpc ")
+		nameSi := sourceInfo.Get(append(path, internal.Method_nameTag))
+		p.printElementString(nameSi, w, indent, mtd.GetName())
+
+		fmt.Fprint(w, "( ")
+		inSi := sourceInfo.Get(append(path, internal.Method_inputTag))
+		inName := p.qualifyName(pkg, pkg, mtd.GetInputType().GetFullyQualifiedName())
+		if mtd.IsClientStreaming() {
+			inName = "stream " + inName
+		}
+		p.printElementString(inSi, w, indent, inName)
+
+		fmt.Fprint(w, ") returns ( ")
+
+		outSi := sourceInfo.Get(append(path, internal.Method_outputTag))
+		outName := p.qualifyName(pkg, pkg, mtd.GetOutputType().GetFullyQualifiedName())
+		if mtd.IsServerStreaming() {
+			outName = "stream " + outName
+		}
+		p.printElementString(outSi, w, indent, outName)
+		fmt.Fprint(w, ") ")
+
+		opts, err := p.extractOptions(mtd, mtd.GetOptions(), mf)
+		if err != nil {
+			if w.err == nil {
+				w.err = err
+			}
+			return
+		}
+
+		if len(opts) > 0 {
+			fmt.Fprintln(w, "{")
+			indent++
+
+			elements := elementAddrs{dsc: mtd, opts: opts}
+			elements.addrs = optionsAsElementAddrs(internal.Method_optionsTag, 0, opts)
+			p.sort(elements, sourceInfo, path)
+			path = append(path, internal.Method_optionsTag)
+
+			for i, addr := range elements.addrs {
+				if i > 0 {
+					p.newLine(w)
+				}
+				o := elements.at(addr).([]option)
+				p.printOptionsLong(o, w, sourceInfo, path, indent)
+			}
+
+			p.indent(w, indent-1)
+			fmt.Fprintln(w, "}")
+		} else {
+			fmt.Fprint(w, ";")
+		}
+	})
+}
+
+func (p *Printer) printOptionsLong(opts []option, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	p.printOptions(opts, w, indent,
+		func(i int32) *descriptor.SourceCodeInfo_Location {
+			return sourceInfo.Get(append(path, i))
+		},
+		func(w *writer, indent int, opt option) {
+			p.indent(w, indent)
+			fmt.Fprint(w, "option ")
+			p.printOption(opt.name, opt.val, w, indent)
+			fmt.Fprint(w, ";")
+		})
+}
+
+func (p *Printer) printOptionsShort(dsc interface{}, optsMsg proto.Message, mf *dynamic.MessageFactory, optsTag int32, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	d, ok := dsc.(desc.Descriptor)
+	if !ok {
+		d = dsc.(extensionRange).owner
+	}
+	opts, err := p.extractOptions(d, optsMsg, mf)
+	if err != nil {
+		if w.err == nil {
+			w.err = err
+		}
+		return
+	}
+
+	elements := elementAddrs{dsc: dsc, opts: opts}
+	elements.addrs = optionsAsElementAddrs(optsTag, 0, opts)
+	p.sort(elements, sourceInfo, path)
+	p.printOptionElementsShort(elements, w, sourceInfo, path, indent)
+}
+
+func (p *Printer) printOptionElementsShort(addrs elementAddrs, w *writer, sourceInfo internal.SourceInfoMap, path []int32, indent int) {
+	if len(addrs.addrs) == 0 {
+		return
+	}
+	first := true
+	fmt.Fprint(w, "[")
+	for _, addr := range addrs.addrs {
+		opts := addrs.at(addr).([]option)
+		var childPath []int32
+		if addr.elementIndex < 0 {
+			// pseudo-option
+			childPath = append(path, int32(-addr.elementIndex))
+		} else {
+			childPath = append(path, addr.elementType, int32(addr.elementIndex))
+		}
+		p.printOptions(opts, w, inline(indent),
+			func(i int32) *descriptor.SourceCodeInfo_Location {
+				p := childPath
+				if addr.elementIndex >= 0 {
+					p = append(p, i)
+				}
+				return sourceInfo.Get(p)
+			},
+			func(w *writer, indent int, opt option) {
+				if first {
+					first = false
+				} else {
+					fmt.Fprint(w, ", ")
+				}
+				p.printOption(opt.name, opt.val, w, indent)
+				fmt.Fprint(w, " ") // trailing space
+			})
+	}
+	fmt.Fprint(w, "]")
+}
+
+func (p *Printer) printOptions(opts []option, w *writer, indent int, siFetch func(i int32) *descriptor.SourceCodeInfo_Location, fn func(w *writer, indent int, opt option)) {
+	for i, opt := range opts {
+		si := siFetch(int32(i))
+		p.printElement(false, si, w, indent, func(w *writer) {
+			fn(w, indent, opt)
+		})
+	}
+}
+
+func inline(indent int) int {
+	if indent < 0 {
+		// already inlined
+		return indent
+	}
+	// negative indent means inline; indent 2 stops further in case value wraps
+	return -indent - 2
+}
+
+func sortKeys(m map[interface{}]interface{}) []interface{} {
+	res := make(sortedKeys, len(m))
+	i := 0
+	for k := range m {
+		res[i] = k
+		i++
+	}
+	sort.Sort(res)
+	return ([]interface{})(res)
+}
+
+type sortedKeys []interface{}
+
+func (k sortedKeys) Len() int {
+	return len(k)
+}
+
+func (k sortedKeys) Swap(i, j int) {
+	k[i], k[j] = k[j], k[i]
+}
+
+func (k sortedKeys) Less(i, j int) bool {
+	switch i := k[i].(type) {
+	case int32:
+		return i < k[j].(int32)
+	case uint32:
+		return i < k[j].(uint32)
+	case int64:
+		return i < k[j].(int64)
+	case uint64:
+		return i < k[j].(uint64)
+	case string:
+		return i < k[j].(string)
+	case bool:
+		return !i && k[j].(bool)
+	default:
+		panic(fmt.Sprintf("invalid type for map key: %T", i))
+	}
+}
+
+func (p *Printer) printOption(name string, optVal interface{}, w *writer, indent int) {
+	fmt.Fprintf(w, "%s = ", name)
+
+	switch optVal := optVal.(type) {
+	case int32, uint32, int64, uint64:
+		fmt.Fprintf(w, "%d", optVal)
+	case float32, float64:
+		fmt.Fprintf(w, "%f", optVal)
+	case string:
+		fmt.Fprintf(w, "%s", quotedString(optVal))
+	case []byte:
+		fmt.Fprintf(w, "%s", quotedString(string(optVal)))
+	case bool:
+		fmt.Fprintf(w, "%v", optVal)
+	case ident:
+		fmt.Fprintf(w, "%s", optVal)
+	case *desc.EnumValueDescriptor:
+		fmt.Fprintf(w, "%s", optVal.GetName())
+	case proto.Message:
+		// TODO: if value is too long, marshal to text format with indentation to
+		// make output prettier (also requires correctly indenting subsequent lines)
+
+		// TODO: alternate approach so we can apply p.ForceFullyQualifiedNames
+		// inside the resulting value?
+
+		fmt.Fprintf(w, "{ %s }", proto.CompactTextString(optVal))
+	default:
+		panic(fmt.Sprintf("unknown type of value %T for field %s", optVal, name))
+	}
+}
+
+type edgeKind int
+
+const (
+	edgeKindOption edgeKind = iota
+	edgeKindFile
+	edgeKindMessage
+	edgeKindField
+	edgeKindOneOf
+	edgeKindExtensionRange
+	edgeKindEnum
+	edgeKindEnumVal
+	edgeKindService
+	edgeKindMethod
+)
+
+// edges in simple state machine for matching options paths
+// whose prefix should be included in source info to handle
+// the way options are printed (which cannot always include
+// the full path from original source)
+var edges = map[edgeKind]map[int32]edgeKind{
+	edgeKindFile: {
+		internal.File_optionsTag:    edgeKindOption,
+		internal.File_messagesTag:   edgeKindMessage,
+		internal.File_enumsTag:      edgeKindEnum,
+		internal.File_extensionsTag: edgeKindField,
+		internal.File_servicesTag:   edgeKindService,
+	},
+	edgeKindMessage: {
+		internal.Message_optionsTag:        edgeKindOption,
+		internal.Message_fieldsTag:         edgeKindField,
+		internal.Message_oneOfsTag:         edgeKindOneOf,
+		internal.Message_nestedMessagesTag: edgeKindMessage,
+		internal.Message_enumsTag:          edgeKindEnum,
+		internal.Message_extensionsTag:     edgeKindField,
+		internal.Message_extensionRangeTag: edgeKindExtensionRange,
+		// TODO: reserved range tag
+	},
+	edgeKindField: {
+		internal.Field_optionsTag: edgeKindOption,
+	},
+	edgeKindOneOf: {
+		internal.OneOf_optionsTag: edgeKindOption,
+	},
+	edgeKindExtensionRange: {
+		internal.ExtensionRange_optionsTag: edgeKindOption,
+	},
+	edgeKindEnum: {
+		internal.Enum_optionsTag: edgeKindOption,
+		internal.Enum_valuesTag:  edgeKindEnumVal,
+	},
+	edgeKindEnumVal: {
+		internal.EnumVal_optionsTag: edgeKindOption,
+	},
+	edgeKindService: {
+		internal.Service_optionsTag: edgeKindOption,
+		internal.Service_methodsTag: edgeKindMethod,
+	},
+	edgeKindMethod: {
+		internal.Method_optionsTag: edgeKindOption,
+	},
+}
+
+func extendOptionLocations(sc internal.SourceInfoMap) {
+	for _, loc := range sc {
+		allowed := edges[edgeKindFile]
+		for i := 0; i+1 < len(loc.Path); i += 2 {
+			nextKind, ok := allowed[loc.Path[i]]
+			if !ok {
+				break
+			}
+			if nextKind == edgeKindOption {
+				// We've found an option entry. This could be arbitrarily
+				// deep (for options that nested messages) or it could end
+				// abruptly (for non-repeated fields). But we need a path
+				// that is exactly the path-so-far plus two: the option tag
+				// and an optional index for repeated option fields (zero
+				// for non-repeated option fields). This is used for
+				// querying source info when printing options.
+				// for sorting elements
+				newPath := make([]int32, i+3)
+				copy(newPath, loc.Path)
+				sc.PutIfAbsent(newPath, loc)
+				// we do another path of path-so-far plus two, but with
+				// explicit zero index -- just in case this actual path has
+				// an extra path element, but it's not an index (e.g the
+				// option field is not repeated, but the source info we are
+				// looking at indicates a tag of a nested field)
+				newPath[len(newPath)-1] = 0
+				sc.PutIfAbsent(newPath, loc)
+				// finally, we need the path-so-far plus one, just the option
+				// tag, for sorting option groups
+				newPath = newPath[:len(newPath)-1]
+				sc.PutIfAbsent(newPath, loc)
+
+				break
+			} else {
+				allowed = edges[nextKind]
+			}
+		}
+	}
+}
+
+func (p *Printer) extractOptions(dsc desc.Descriptor, opts proto.Message, mf *dynamic.MessageFactory) (map[int32][]option, error) {
+	md, err := desc.LoadMessageDescriptorForMessage(opts)
+	if err != nil {
+		return nil, err
+	}
+	dm := mf.NewDynamicMessage(md)
+	if err = dm.ConvertFrom(opts); err != nil {
+		return nil, fmt.Errorf("failed convert %s to dynamic message: %v", md.GetFullyQualifiedName(), err)
+	}
+
+	pkg := dsc.GetFile().GetPackage()
+	var scope string
+	if _, ok := dsc.(*desc.FileDescriptor); ok {
+		scope = pkg
+	} else {
+		scope = dsc.GetFullyQualifiedName()
+	}
+
+	options := map[int32][]option{}
+	var uninterpreted []interface{}
+	for _, fldset := range [][]*desc.FieldDescriptor{md.GetFields(), mf.GetExtensionRegistry().AllExtensionsForType(md.GetFullyQualifiedName())} {
+		for _, fld := range fldset {
+			if dm.HasField(fld) {
+				val := dm.GetField(fld)
+				var opts []option
+				var name string
+				if fld.IsExtension() {
+					name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+				} else {
+					name = fld.GetName()
+				}
+				switch val := val.(type) {
+				case []interface{}:
+					if fld.GetNumber() == internal.UninterpretedOptionsTag {
+						// we handle uninterpreted options differently
+						uninterpreted = val
+						continue
+					}
+
+					for _, e := range val {
+						if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+							ev := fld.GetEnumType().FindValueByNumber(e.(int32))
+							if ev == nil {
+								// have to skip unknown enum values :(
+								continue
+							}
+							e = ev
+						}
+						var name string
+						if fld.IsExtension() {
+							name = fmt.Sprintf("(%s)", p.qualifyName(pkg, scope, fld.GetFullyQualifiedName()))
+						} else {
+							name = fld.GetName()
+						}
+						opts = append(opts, option{name: name, val: e})
+					}
+				case map[interface{}]interface{}:
+					for k := range sortKeys(val) {
+						v := val[k]
+						vf := fld.GetMapValueType()
+						if vf.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+							ev := vf.GetEnumType().FindValueByNumber(v.(int32))
+							if ev == nil {
+								// have to skip unknown enum values :(
+								continue
+							}
+							v = ev
+						}
+						entry := mf.NewDynamicMessage(fld.GetMessageType())
+						entry.SetFieldByNumber(1, k)
+						entry.SetFieldByNumber(2, v)
+						opts = append(opts, option{name: name, val: entry})
+					}
+				default:
+					if fld.GetType() == descriptor.FieldDescriptorProto_TYPE_ENUM {
+						ev := fld.GetEnumType().FindValueByNumber(val.(int32))
+						if ev == nil {
+							// have to skip unknown enum values :(
+							continue
+						}
+						val = ev
+					}
+					opts = append(opts, option{name: name, val: val})
+				}
+				if len(opts) > 0 {
+					options[fld.GetNumber()] = opts
+				}
+			}
+		}
+	}
+
+	// if there are uninterpreted options, add those too
+	if len(uninterpreted) > 0 {
+		opts := make([]option, len(uninterpreted))
+		for i, u := range uninterpreted {
+			var unint *descriptor.UninterpretedOption
+			if un, ok := u.(*descriptor.UninterpretedOption); ok {
+				unint = un
+			} else {
+				dm := u.(*dynamic.Message)
+				unint = &descriptor.UninterpretedOption{}
+				if err := dm.ConvertTo(unint); err != nil {
+					return nil, err
+				}
+			}
+
+			var buf bytes.Buffer
+			for ni, n := range unint.Name {
+				if ni > 0 {
+					buf.WriteByte('.')
+				}
+				if n.GetIsExtension() {
+					fmt.Fprintf(&buf, "(%s)", n.GetNamePart())
+				} else {
+					buf.WriteString(n.GetNamePart())
+				}
+			}
+
+			var v interface{}
+			switch {
+			case unint.IdentifierValue != nil:
+				v = ident(unint.GetIdentifierValue())
+			case unint.StringValue != nil:
+				v = string(unint.GetStringValue())
+			case unint.DoubleValue != nil:
+				v = unint.GetDoubleValue()
+			case unint.PositiveIntValue != nil:
+				v = unint.GetPositiveIntValue()
+			case unint.NegativeIntValue != nil:
+				v = unint.GetNegativeIntValue()
+			case unint.AggregateValue != nil:
+				v = ident(unint.GetAggregateValue())
+			}
+
+			opts[i] = option{name: buf.String(), val: v}
+		}
+		options[internal.UninterpretedOptionsTag] = opts
+	}
+
+	return options, nil
+}
+
+func optionsAsElementAddrs(optionsTag int32, order int, opts map[int32][]option) []elementAddr {
+	var optAddrs []elementAddr
+	for tag := range opts {
+		optAddrs = append(optAddrs, elementAddr{elementType: optionsTag, elementIndex: int(tag), order: order})
+	}
+	sort.Sort(optionsByName{addrs: optAddrs, opts: opts})
+	return optAddrs
+}
+
+// quotedString implements the text format for string literals for protocol
+// buffers. This form is also acceptable for string literals in option values
+// by the protocol buffer compiler, protoc.
+func quotedString(s string) string {
+	var b bytes.Buffer
+	// use WriteByte here to get any needed indent
+	b.WriteByte('"')
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			b.WriteString("\\n")
+		case '\r':
+			b.WriteString("\\r")
+		case '\t':
+			b.WriteString("\\t")
+		case '"':
+			b.WriteString("\\")
+		case '\\':
+			b.WriteString("\\\\")
+		default:
+			if c >= 0x20 && c < 0x7f {
+				b.WriteByte(c)
+			} else {
+				fmt.Fprintf(&b, "\\%03o", c)
+			}
+		}
+	}
+	b.WriteByte('"')
+
+	return b.String()
+}
+
+type elementAddr struct {
+	elementType  int32
+	elementIndex int
+	order        int
+}
+
+type elementAddrs struct {
+	addrs []elementAddr
+	dsc   interface{}
+	opts  map[int32][]option
+}
+
+func (a elementAddrs) Len() int {
+	return len(a.addrs)
+}
+
+func (a elementAddrs) Less(i, j int) bool {
+	// explicit order is considered first
+	if a.addrs[i].order < a.addrs[j].order {
+		return true
+	} else if a.addrs[i].order > a.addrs[j].order {
+		return false
+	}
+	// if order is equal, sort by element type
+	if a.addrs[i].elementType < a.addrs[j].elementType {
+		return true
+	} else if a.addrs[i].elementType > a.addrs[j].elementType {
+		return false
+	}
+
+	di := a.at(a.addrs[i])
+	dj := a.at(a.addrs[j])
+
+	switch vi := di.(type) {
+	case *desc.FieldDescriptor:
+		// fields are ordered by tag number
+		vj := dj.(*desc.FieldDescriptor)
+		// regular fields before extensions; extensions grouped by extendee
+		if !vi.IsExtension() && vj.IsExtension() {
+			return true
+		} else if vi.IsExtension() && !vj.IsExtension() {
+			return false
+		} else if vi.IsExtension() && vj.IsExtension() {
+			if vi.GetOwner() != vj.GetOwner() {
+				return vi.GetOwner().GetFullyQualifiedName() < vj.GetOwner().GetFullyQualifiedName()
+			}
+		}
+		return vi.GetNumber() < vj.GetNumber()
+
+	case *desc.EnumValueDescriptor:
+		// enum values ordered by number then name
+		vj := dj.(*desc.EnumValueDescriptor)
+		if vi.GetNumber() == vj.GetNumber() {
+			return vi.GetName() < vj.GetName()
+		}
+		return vi.GetNumber() < vj.GetNumber()
+
+	case *descriptor.DescriptorProto_ExtensionRange:
+		// extension ranges ordered by tag
+		return vi.GetStart() < dj.(*descriptor.DescriptorProto_ExtensionRange).GetStart()
+
+	case reservedRange:
+		// reserved ranges ordered by tag, too
+		return vi.start < dj.(reservedRange).start
+
+	case string:
+		// reserved names lexically sorted
+		return vi < dj.(string)
+
+	case pkg:
+		// reserved names lexically sorted
+		return vi < dj.(pkg)
+
+	case imp:
+		// reserved names lexically sorted
+		return vi < dj.(imp)
+
+	case []option:
+		// options sorted by name, extensions last
+		return optionLess(vi, dj.([]option))
+
+	default:
+		// all other descriptors ordered by name
+		return di.(desc.Descriptor).GetName() < dj.(desc.Descriptor).GetName()
+	}
+}
+
+func (a elementAddrs) Swap(i, j int) {
+	a.addrs[i], a.addrs[j] = a.addrs[j], a.addrs[i]
+}
+
+func (a elementAddrs) at(addr elementAddr) interface{} {
+	switch dsc := a.dsc.(type) {
+	case *desc.FileDescriptor:
+		switch addr.elementType {
+		case internal.File_packageTag:
+			return pkg(dsc.GetPackage())
+		case internal.File_dependencyTag:
+			return imp(dsc.AsFileDescriptorProto().GetDependency()[addr.elementIndex])
+		case internal.File_optionsTag:
+			return a.opts[int32(addr.elementIndex)]
+		case internal.File_messagesTag:
+			return dsc.GetMessageTypes()[addr.elementIndex]
+		case internal.File_enumsTag:
+			return dsc.GetEnumTypes()[addr.elementIndex]
+		case internal.File_servicesTag:
+			return dsc.GetServices()[addr.elementIndex]
+		case internal.File_extensionsTag:
+			return dsc.GetExtensions()[addr.elementIndex]
+		}
+	case *desc.MessageDescriptor:
+		switch addr.elementType {
+		case internal.Message_optionsTag:
+			return a.opts[int32(addr.elementIndex)]
+		case internal.Message_fieldsTag:
+			return dsc.GetFields()[addr.elementIndex]
+		case internal.Message_nestedMessagesTag:
+			return dsc.GetNestedMessageTypes()[addr.elementIndex]
+		case internal.Message_enumsTag:
+			return dsc.GetNestedEnumTypes()[addr.elementIndex]
+		case internal.Message_extensionsTag:
+			return dsc.GetNestedExtensions()[addr.elementIndex]
+		case internal.Message_extensionRangeTag:
+			return dsc.AsDescriptorProto().GetExtensionRange()[addr.elementIndex]
+		case internal.Message_reservedRangeTag:
+			rng := dsc.AsDescriptorProto().GetReservedRange()[addr.elementIndex]
+			return reservedRange{start: rng.GetStart(), end: rng.GetEnd() - 1}
+		case internal.Message_reservedNameTag:
+			return dsc.AsDescriptorProto().GetReservedName()[addr.elementIndex]
+		}
+	case *desc.FieldDescriptor:
+		if addr.elementType == internal.Field_optionsTag {
+			return a.opts[int32(addr.elementIndex)]
+		}
+	case *desc.OneOfDescriptor:
+		switch addr.elementType {
+		case internal.OneOf_optionsTag:
+			return a.opts[int32(addr.elementIndex)]
+		case -internal.Message_fieldsTag:
+			return dsc.GetOwner().GetFields()[addr.elementIndex]
+		}
+	case *desc.EnumDescriptor:
+		switch addr.elementType {
+		case internal.Enum_optionsTag:
+			return a.opts[int32(addr.elementIndex)]
+		case internal.Enum_valuesTag:
+			return dsc.GetValues()[addr.elementIndex]
+		case internal.Enum_reservedRangeTag:
+			rng := dsc.AsEnumDescriptorProto().GetReservedRange()[addr.elementIndex]
+			return reservedRange{start: rng.GetStart(), end: rng.GetEnd()}
+		case internal.Enum_reservedNameTag:
+			return dsc.AsEnumDescriptorProto().GetReservedName()[addr.elementIndex]
+		}
+	case *desc.EnumValueDescriptor:
+		if addr.elementType == internal.EnumVal_optionsTag {
+			return a.opts[int32(addr.elementIndex)]
+		}
+	case *desc.ServiceDescriptor:
+		switch addr.elementType {
+		case internal.Service_optionsTag:
+			return a.opts[int32(addr.elementIndex)]
+		case internal.Service_methodsTag:
+			return dsc.GetMethods()[addr.elementIndex]
+		}
+	case *desc.MethodDescriptor:
+		if addr.elementType == internal.Method_optionsTag {
+			return a.opts[int32(addr.elementIndex)]
+		}
+	case extensionRange:
+		if addr.elementType == internal.ExtensionRange_optionsTag {
+			return a.opts[int32(addr.elementIndex)]
+		}
+	}
+
+	panic(fmt.Sprintf("location for unknown field %d of %T", addr.elementType, a.dsc))
+}
+
+type extensionRange struct {
+	owner    *desc.MessageDescriptor
+	extRange *descriptor.DescriptorProto_ExtensionRange
+}
+
+type elementSrcOrder struct {
+	elementAddrs
+	sourceInfo internal.SourceInfoMap
+	prefix     []int32
+}
+
+func (a elementSrcOrder) Less(i, j int) bool {
+	ti := a.addrs[i].elementType
+	ei := a.addrs[i].elementIndex
+
+	tj := a.addrs[j].elementType
+	ej := a.addrs[j].elementIndex
+
+	var si, sj *descriptor.SourceCodeInfo_Location
+	if ei < 0 {
+		si = a.sourceInfo.Get(append(a.prefix, -int32(ei)))
+	} else if ti < 0 {
+		p := make([]int32, len(a.prefix)-2)
+		copy(p, a.prefix)
+		si = a.sourceInfo.Get(append(p, ti, int32(ei)))
+	} else {
+		si = a.sourceInfo.Get(append(a.prefix, ti, int32(ei)))
+	}
+	if ej < 0 {
+		sj = a.sourceInfo.Get(append(a.prefix, -int32(ej)))
+	} else if tj < 0 {
+		p := make([]int32, len(a.prefix)-2)
+		copy(p, a.prefix)
+		sj = a.sourceInfo.Get(append(p, tj, int32(ej)))
+	} else {
+		sj = a.sourceInfo.Get(append(a.prefix, tj, int32(ej)))
+	}
+
+	if (si == nil) != (sj == nil) {
+		// generally, we put unknown elements after known ones;
+		// except package and option elements go first
+
+		// i will be unknown and j will be known
+		swapped := false
+		if si != nil {
+			si, sj = sj, si
+			// no need to swap ti and tj because we don't use tj anywhere below
+			ti = tj
+			swapped = true
+		}
+		switch a.dsc.(type) {
+		case *desc.FileDescriptor:
+			if ti == internal.File_packageTag || ti == internal.File_optionsTag {
+				return !swapped
+			}
+		case *desc.MessageDescriptor:
+			if ti == internal.Message_optionsTag {
+				return !swapped
+			}
+		case *desc.EnumDescriptor:
+			if ti == internal.Enum_optionsTag {
+				return !swapped
+			}
+		case *desc.ServiceDescriptor:
+			if ti == internal.Service_optionsTag {
+				return !swapped
+			}
+		}
+		return swapped
+
+	} else if si == nil || sj == nil {
+		// let stable sort keep unknown elements in same relative order
+		return false
+	}
+
+	for idx := 0; idx < len(sj.Span); idx++ {
+		if idx >= len(si.Span) {
+			return true
+		}
+		if si.Span[idx] < sj.Span[idx] {
+			return true
+		}
+		if si.Span[idx] > sj.Span[idx] {
+			return false
+		}
+	}
+	return false
+}
+
+type optionsByName struct {
+	addrs []elementAddr
+	opts  map[int32][]option
+}
+
+func (o optionsByName) Len() int {
+	return len(o.addrs)
+}
+
+func (o optionsByName) Less(i, j int) bool {
+	oi := o.opts[int32(o.addrs[i].elementIndex)]
+	oj := o.opts[int32(o.addrs[j].elementIndex)]
+	return optionLess(oi, oj)
+}
+
+func optionLess(i, j []option) bool {
+	ni := i[0].name
+	nj := j[0].name
+	if ni[0] != '(' && nj[0] == '(' {
+		return true
+	} else if ni[0] == '(' && nj[0] != '(' {
+		return false
+	}
+	return ni < nj
+}
+
+func (o optionsByName) Swap(i, j int) {
+	o.addrs[i], o.addrs[j] = o.addrs[j], o.addrs[i]
+}
+
+func (p *Printer) printElement(isDecriptor bool, si *descriptor.SourceCodeInfo_Location, w *writer, indent int, el func(*writer)) {
+	includeComments := isDecriptor || p.includeCommentType(CommentsTokens)
+
+	if includeComments && si != nil {
+		p.printLeadingComments(si, w, indent)
+	}
+	el(w)
+	if includeComments && si != nil {
+		p.printTrailingComments(si, w, indent)
+	}
+	if indent >= 0 && !w.newline {
+		// if we're not printing inline but element did not have trailing newline, add one now
+		fmt.Fprintln(w)
+	}
+}
+
+func (p *Printer) printElementString(si *descriptor.SourceCodeInfo_Location, w *writer, indent int, str string) {
+	p.printElement(false, si, w, inline(indent), func(w *writer) {
+		fmt.Fprintf(w, "%s ", str)
+	})
+}
+
+func (p *Printer) includeCommentType(c CommentType) bool {
+	return (p.OmitComments & c) == 0
+}
+
+func (p *Printer) printLeadingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) bool {
+	endsInNewLine := false
+
+	if p.includeCommentType(CommentsDetached) {
+		for _, c := range si.GetLeadingDetachedComments() {
+			if p.printComment(c, w, indent, true) {
+				// if comment ended in newline, add another newline to separate
+				// this comment from the next
+				p.newLine(w)
+				endsInNewLine = true
+			} else if indent < 0 {
+				// comment did not end in newline and we are trying to inline?
+				// just add a space to separate this comment from what follows
+				fmt.Fprint(w, " ")
+				endsInNewLine = false
+			} else {
+				// comment did not end in newline and we are *not* trying to inline?
+				// add newline to end of comment and add another to separate this
+				// comment from what follows
+				fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+				p.newLine(w)
+				endsInNewLine = true
+			}
+		}
+	}
+
+	if p.includeCommentType(CommentsLeading) && si.GetLeadingComments() != "" {
+		endsInNewLine = p.printComment(si.GetLeadingComments(), w, indent, true)
+		if !endsInNewLine {
+			if indent >= 0 {
+				// leading comment didn't end with newline but needs one
+				// (because we're *not* inlining)
+				fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+				endsInNewLine = true
+			} else {
+				// space between comment and following element when inlined
+				fmt.Fprint(w, " ")
+			}
+		}
+	}
+
+	return endsInNewLine
+}
+
+func (p *Printer) printTrailingComments(si *descriptor.SourceCodeInfo_Location, w *writer, indent int) {
+	if p.includeCommentType(CommentsTrailing) && si.GetTrailingComments() != "" {
+		if !p.printComment(si.GetTrailingComments(), w, indent, p.TrailingCommentsOnSeparateLine) && indent >= 0 {
+			// trailing comment didn't end with newline but needs one
+			// (because we're *not* inlining)
+			fmt.Fprintln(w) // needed to end comment, regardless of p.Compact
+		} else if indent < 0 {
+			fmt.Fprint(w, " ")
+		}
+	}
+}
+
+func (p *Printer) printComment(comments string, w *writer, indent int, forceNextLine bool) bool {
+	if comments == "" {
+		return false
+	}
+
+	var multiLine bool
+	if indent < 0 {
+		// use multi-line style when inlining
+		multiLine = true
+	} else {
+		multiLine = p.PreferMultiLineStyleComments
+	}
+	if multiLine && strings.Contains(comments, "*/") {
+		// can't emit '*/' in a multi-line style comment
+		multiLine = false
+	}
+
+	lines := strings.Split(comments, "\n")
+
+	// first, remove leading and trailing blank lines
+	if lines[0] == "" {
+		lines = lines[1:]
+	}
+	if lines[len(lines)-1] == "" {
+		lines = lines[:len(lines)-1]
+	}
+	if len(lines) == 0 {
+		return false
+	}
+
+	if indent >= 0 && !w.newline {
+		// last element did not have trailing newline, so we
+		// either need to tack on newline or, if comment is
+		// just one line, inline it on the end
+		if forceNextLine || len(lines) > 1 {
+			fmt.Fprintln(w)
+		} else {
+			if !w.space {
+				fmt.Fprint(w, " ")
+			}
+			indent = inline(indent)
+		}
+	}
+
+	if len(lines) == 1 && multiLine {
+		p.indent(w, indent)
+		line := lines[0]
+		if line[0] == ' ' && line[len(line)-1] != ' ' {
+			// add trailing space for symmetry
+			line += " "
+		}
+		fmt.Fprintf(w, "/*%s*/", line)
+		if indent >= 0 {
+			fmt.Fprintln(w)
+			return true
+		}
+		return false
+	}
+
+	if multiLine {
+		// multi-line style comments that actually span multiple lines
+		// get a blank line before and after so that comment renders nicely
+		lines = append(lines, "", "")
+		copy(lines[1:], lines)
+		lines[0] = ""
+	}
+
+	for i, l := range lines {
+		p.maybeIndent(w, indent, i > 0)
+		if multiLine {
+			if i == 0 {
+				// first line
+				fmt.Fprintf(w, "/*%s\n", strings.TrimRight(l, " \t"))
+			} else if i == len(lines)-1 {
+				// last line
+				if l == "" {
+					fmt.Fprint(w, " */")
+				} else {
+					fmt.Fprintf(w, " *%s*/", l)
+				}
+				if indent >= 0 {
+					fmt.Fprintln(w)
+				}
+			} else {
+				fmt.Fprintf(w, " *%s\n", strings.TrimRight(l, " \t"))
+			}
+		} else {
+			fmt.Fprintf(w, "//%s\n", strings.TrimRight(l, " \t"))
+		}
+	}
+
+	// single-line comments always end in newline; multi-line comments only
+	// end in newline for non-negative (e.g. non-inlined) indentation
+	return !multiLine || indent >= 0
+}
+
+func (p *Printer) indent(w io.Writer, indent int) {
+	for i := 0; i < indent; i++ {
+		fmt.Fprint(w, p.Indent)
+	}
+}
+
+func (p *Printer) maybeIndent(w io.Writer, indent int, requireIndent bool) {
+	if indent < 0 && requireIndent {
+		p.indent(w, -indent)
+	} else {
+		p.indent(w, indent)
+	}
+}
+
+type writer struct {
+	io.Writer
+	err     error
+	space   bool
+	newline bool
+}
+
+func newWriter(w io.Writer) *writer {
+	return &writer{Writer: w, newline: true}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+	if len(p) == 0 {
+		return 0, nil
+	}
+
+	w.newline = false
+
+	if w.space {
+		// skip any trailing space if the following
+		// character is semicolon, comma, or close bracket
+		if p[0] != ';' && p[0] != ',' && p[0] != ']' {
+			_, err := w.Writer.Write([]byte{' '})
+			if err != nil {
+				w.err = err
+				return 0, err
+			}
+		}
+		w.space = false
+	}
+
+	if p[len(p)-1] == ' ' {
+		w.space = true
+		p = p[:len(p)-1]
+	}
+	if len(p) > 0 && p[len(p)-1] == '\n' {
+		w.newline = true
+	}
+
+	num, err := w.Writer.Write(p)
+	if err != nil {
+		w.err = err
+	} else if w.space {
+		// pretend space was written
+		num++
+	}
+	return num, err
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/binary.go b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
new file mode 100644
index 0000000..b1fbe7c
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/binary.go
@@ -0,0 +1,714 @@
+package dynamic
+
+// Binary serialization and de-serialization for dynamic messages
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// defaultDeterminism, if true, will mean that calls to Marshal will produce
+// deterministic output. This is used to make the output of proto.Marshal(...)
+// deterministic (since there is no way to have that convey determinism intent).
+// **This is only used from tests.**
+var defaultDeterminism = false
+
+// Marshal serializes this message to bytes, returning an error if the operation
+// fails. The resulting bytes are in the standard protocol buffer binary format.
+func (m *Message) Marshal() ([]byte, error) {
+	var b codedBuffer
+	if err := m.marshal(&b, defaultDeterminism); err != nil {
+		return nil, err
+	}
+	return b.buf, nil
+}
+
+// MarshalAppend behaves exactly the same as Marshal, except instead of allocating a
+// new byte slice to marshal into, it uses the provided byte slice. The backing array
+// for the returned byte slice *may* be the same as the one that was passed in, but
+// it's not guaranteed as a new backing array will automatically be allocated if
+// more bytes need to be written than the provided buffer has capacity for.
+func (m *Message) MarshalAppend(b []byte) ([]byte, error) {
+	codedBuf := codedBuffer{buf: b}
+	if err := m.marshal(&codedBuf, defaultDeterminism); err != nil {
+		return nil, err
+	}
+	return codedBuf.buf, nil
+}
+
+// MarshalDeterministic serializes this message to bytes in a deterministic way,
+// returning an error if the operation fails. This differs from Marshal in that
+// map keys will be sorted before serializing to bytes. The protobuf spec does
+// not define ordering for map entries, so Marshal will use standard Go map
+// iteration order (which will be random). But for cases where determinism is
+// more important than performance, use this method instead.
+func (m *Message) MarshalDeterministic() ([]byte, error) {
+	var b codedBuffer
+	if err := m.marshal(&b, true); err != nil {
+		return nil, err
+	}
+	return b.buf, nil
+}
+
+func (m *Message) marshal(b *codedBuffer, deterministic bool) error {
+	if err := m.marshalKnownFields(b, deterministic); err != nil {
+		return err
+	}
+	return m.marshalUnknownFields(b)
+}
+
+func (m *Message) marshalKnownFields(b *codedBuffer, deterministic bool) error {
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		val := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd == nil {
+			panic(fmt.Sprintf("Couldn't find field for tag %d", itag))
+		}
+		if err := marshalField(itag, fd, val, b, deterministic); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (m *Message) marshalUnknownFields(b *codedBuffer) error {
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		sl := m.unknownFields[itag]
+		for _, u := range sl {
+			if err := b.encodeTagAndWireType(itag, u.Encoding); err != nil {
+				return err
+			}
+			switch u.Encoding {
+			case proto.WireBytes:
+				if err := b.encodeRawBytes(u.Contents); err != nil {
+					return err
+				}
+			case proto.WireStartGroup:
+				b.buf = append(b.buf, u.Contents...)
+				if err := b.encodeTagAndWireType(itag, proto.WireEndGroup); err != nil {
+					return err
+				}
+			case proto.WireFixed32:
+				if err := b.encodeFixed32(u.Value); err != nil {
+					return err
+				}
+			case proto.WireFixed64:
+				if err := b.encodeFixed64(u.Value); err != nil {
+					return err
+				}
+			case proto.WireVarint:
+				if err := b.encodeVarint(u.Value); err != nil {
+					return err
+				}
+			default:
+				return proto.ErrInternalBadWireType
+			}
+		}
+	}
+	return nil
+}
+
+func marshalField(tag int32, fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+	if fd.IsMap() {
+		mp := val.(map[interface{}]interface{})
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valType := entryType.FindFieldByNumber(2)
+		var entryBuffer codedBuffer
+		if deterministic {
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, k := range keys {
+				v := mp[k]
+				entryBuffer.reset()
+				if err := marshalFieldElement(1, keyType, k, &entryBuffer, deterministic); err != nil {
+					return err
+				}
+				if err := marshalFieldElement(2, valType, v, &entryBuffer, deterministic); err != nil {
+					return err
+				}
+				if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+					return err
+				}
+				if err := b.encodeRawBytes(entryBuffer.buf); err != nil {
+					return err
+				}
+			}
+		} else {
+			for k, v := range mp {
+				entryBuffer.reset()
+				if err := marshalFieldElement(1, keyType, k, &entryBuffer, deterministic); err != nil {
+					return err
+				}
+				if err := marshalFieldElement(2, valType, v, &entryBuffer, deterministic); err != nil {
+					return err
+				}
+				if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+					return err
+				}
+				if err := b.encodeRawBytes(entryBuffer.buf); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	} else if fd.IsRepeated() {
+		sl := val.([]interface{})
+		wt, err := getWireType(fd.GetType())
+		if err != nil {
+			return err
+		}
+		if isPacked(fd) && len(sl) > 1 &&
+			(wt == proto.WireVarint || wt == proto.WireFixed32 || wt == proto.WireFixed64) {
+			// packed repeated field
+			var packedBuffer codedBuffer
+			for _, v := range sl {
+				if err := marshalFieldValue(fd, v, &packedBuffer, deterministic); err != nil {
+					return err
+				}
+			}
+			if err := b.encodeTagAndWireType(tag, proto.WireBytes); err != nil {
+				return err
+			}
+			return b.encodeRawBytes(packedBuffer.buf)
+		} else {
+			// non-packed repeated field
+			for _, v := range sl {
+				if err := marshalFieldElement(tag, fd, v, b, deterministic); err != nil {
+					return err
+				}
+			}
+			return nil
+		}
+	} else {
+		return marshalFieldElement(tag, fd, val, b, deterministic)
+	}
+}
+
+func isPacked(fd *desc.FieldDescriptor) bool {
+	opts := fd.AsFieldDescriptorProto().GetOptions()
+	// if set, use that value
+	if opts != nil && opts.Packed != nil {
+		return opts.GetPacked()
+	}
+	// if unset: proto2 defaults to false, proto3 to true
+	return fd.GetFile().IsProto3()
+}
+
+// sortable is used to sort map keys. Values will be integers (int32, int64, uint32, and uint64),
+// bools, or strings.
+type sortable []interface{}
+
+func (s sortable) Len() int {
+	return len(s)
+}
+
+func (s sortable) Less(i, j int) bool {
+	vi := s[i]
+	vj := s[j]
+	switch reflect.TypeOf(vi).Kind() {
+	case reflect.Int32:
+		return vi.(int32) < vj.(int32)
+	case reflect.Int64:
+		return vi.(int64) < vj.(int64)
+	case reflect.Uint32:
+		return vi.(uint32) < vj.(uint32)
+	case reflect.Uint64:
+		return vi.(uint64) < vj.(uint64)
+	case reflect.String:
+		return vi.(string) < vj.(string)
+	case reflect.Bool:
+		return vi.(bool) && !vj.(bool)
+	default:
+		panic(fmt.Sprintf("cannot compare keys of type %v", reflect.TypeOf(vi)))
+	}
+}
+
+func (s sortable) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func marshalFieldElement(tag int32, fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+	wt, err := getWireType(fd.GetType())
+	if err != nil {
+		return err
+	}
+	if err := b.encodeTagAndWireType(tag, wt); err != nil {
+		return err
+	}
+	if err := marshalFieldValue(fd, val, b, deterministic); err != nil {
+		return err
+	}
+	if wt == proto.WireStartGroup {
+		return b.encodeTagAndWireType(tag, proto.WireEndGroup)
+	}
+	return nil
+}
+
+func marshalFieldValue(fd *desc.FieldDescriptor, val interface{}, b *codedBuffer, deterministic bool) error {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		v := val.(bool)
+		if v {
+			return b.encodeVarint(1)
+		} else {
+			return b.encodeVarint(0)
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_INT32:
+		v := val.(int32)
+		return b.encodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		v := val.(int32)
+		return b.encodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		v := val.(int32)
+		return b.encodeVarint(encodeZigZag32(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		v := val.(uint32)
+		return b.encodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		v := val.(uint32)
+		return b.encodeFixed32(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		v := val.(int64)
+		return b.encodeVarint(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		v := val.(int64)
+		return b.encodeFixed64(uint64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		v := val.(int64)
+		return b.encodeVarint(encodeZigZag64(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		v := val.(uint64)
+		return b.encodeVarint(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		v := val.(uint64)
+		return b.encodeFixed64(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		v := val.(float64)
+		return b.encodeFixed64(math.Float64bits(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		v := val.(float32)
+		return b.encodeFixed32(uint64(math.Float32bits(v)))
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		v := val.([]byte)
+		return b.encodeRawBytes(v)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		v := val.(string)
+		return b.encodeRawBytes(([]byte)(v))
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		m := val.(proto.Message)
+		if bytes, err := proto.Marshal(m); err != nil {
+			return err
+		} else {
+			return b.encodeRawBytes(bytes)
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		// just append the nested message to this buffer
+		dm, ok := val.(*Message)
+		if ok {
+			return dm.marshal(b, deterministic)
+		} else {
+			m := val.(proto.Message)
+			return b.encodeMessage(m)
+		}
+		// whosoever writeth start-group tag (e.g. caller) is responsible for writing end-group tag
+
+	default:
+		return fmt.Errorf("unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func getWireType(t descriptor.FieldDescriptorProto_Type) (int8, error) {
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return proto.WireVarint, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return proto.WireFixed32, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return proto.WireFixed64, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES,
+		descriptor.FieldDescriptorProto_TYPE_STRING,
+		descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return proto.WireBytes, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return proto.WireStartGroup, nil
+
+	default:
+		return 0, proto.ErrInternalBadWireType
+	}
+}
+
+// Unmarshal de-serializes the message that is present in the given bytes into
+// this message. It first resets the current message. It returns an error if the
+// given bytes do not contain a valid encoding of this message type.
+func (m *Message) Unmarshal(b []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMerge(b); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMerge de-serializes the message that is present in the given bytes
+// into this message. Unlike Unmarshal, it does not first reset the message,
+// instead merging the data in the given bytes into the existing data in this
+// message.
+func (m *Message) UnmarshalMerge(b []byte) error {
+	return m.unmarshal(newCodedBuffer(b), false)
+}
+
+func (m *Message) unmarshal(buf *codedBuffer, isGroup bool) error {
+	for !buf.eof() {
+		tagNumber, wireType, err := buf.decodeTagAndWireType()
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireEndGroup {
+			if isGroup {
+				// finished parsing group
+				return nil
+			} else {
+				return proto.ErrInternalBadWireType
+			}
+		}
+		fd := m.FindFieldDescriptor(tagNumber)
+		if fd == nil {
+			err := m.unmarshalUnknownField(tagNumber, wireType, buf)
+			if err != nil {
+				return err
+			}
+		} else {
+			err := m.unmarshalKnownField(fd, wireType, buf)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	if isGroup {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+
+func unmarshalSimpleField(fd *desc.FieldDescriptor, v uint64) (interface{}, error) {
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return v != 0, nil
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if v > math.MaxUint32 {
+			return nil, NumericOverflowError
+		}
+		return uint32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		s := int64(v)
+		if s > math.MaxInt32 || s < math.MinInt32 {
+			return nil, NumericOverflowError
+		}
+		return int32(s), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if v > math.MaxUint32 {
+			return nil, NumericOverflowError
+		}
+		return int32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		if v > math.MaxUint32 {
+			return nil, NumericOverflowError
+		}
+		return decodeZigZag32(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return v, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return int64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return decodeZigZag64(v), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if v > math.MaxUint32 {
+			return nil, NumericOverflowError
+		}
+		return math.Float32frombits(uint32(v)), nil
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return math.Float64frombits(v), nil
+
+	default:
+		// bytes, string, message, and group cannot be represented as a simple numeric value
+		return nil, fmt.Errorf("bad input; field %s requires length-delimited wire type", fd.GetFullyQualifiedName())
+	}
+}
+
+func unmarshalLengthDelimitedField(fd *desc.FieldDescriptor, bytes []byte, mf *MessageFactory) (interface{}, error) {
+	switch {
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return bytes, nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING:
+		return string(bytes), nil
+
+	case fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE ||
+		fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP:
+		msg := mf.NewMessage(fd.GetMessageType())
+		err := proto.Unmarshal(bytes, msg)
+		if err != nil {
+			return nil, err
+		} else {
+			return msg, nil
+		}
+
+	default:
+		// even if the field is not repeated or not packed, we still parse it as such for
+		// backwards compatibility (e.g. message we are de-serializing could have been both
+		// repeated and packed at the time of serialization)
+		packedBuf := newCodedBuffer(bytes)
+		var slice []interface{}
+		var val interface{}
+		for !packedBuf.eof() {
+			var v uint64
+			var err error
+			if varintTypes[fd.GetType()] {
+				v, err = packedBuf.decodeVarint()
+			} else if fixed32Types[fd.GetType()] {
+				v, err = packedBuf.decodeFixed32()
+			} else if fixed64Types[fd.GetType()] {
+				v, err = packedBuf.decodeFixed64()
+			} else {
+				return nil, fmt.Errorf("bad input; cannot parse length-delimited wire type for field %s", fd.GetFullyQualifiedName())
+			}
+			if err != nil {
+				return nil, err
+			}
+			val, err = unmarshalSimpleField(fd, v)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() {
+				slice = append(slice, val)
+			}
+		}
+		if fd.IsRepeated() {
+			return slice, nil
+		} else {
+			// if not a repeated field, last value wins
+			return val, nil
+		}
+	}
+}
+
+func (m *Message) unmarshalKnownField(fd *desc.FieldDescriptor, encoding int8, b *codedBuffer) error {
+	var val interface{}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		var num uint64
+		num, err = b.decodeFixed32()
+		if err == nil {
+			val, err = unmarshalSimpleField(fd, num)
+		}
+	case proto.WireFixed64:
+		var num uint64
+		num, err = b.decodeFixed64()
+		if err == nil {
+			val, err = unmarshalSimpleField(fd, num)
+		}
+	case proto.WireVarint:
+		var num uint64
+		num, err = b.decodeVarint()
+		if err == nil {
+			val, err = unmarshalSimpleField(fd, num)
+		}
+
+	case proto.WireBytes:
+		if fd.GetType() == descriptor.FieldDescriptorProto_TYPE_BYTES {
+			val, err = b.decodeRawBytes(true) // defensive copy
+		} else if fd.GetType() == descriptor.FieldDescriptorProto_TYPE_STRING {
+			var raw []byte
+			raw, err = b.decodeRawBytes(true) // defensive copy
+			if err == nil {
+				val = string(raw)
+			}
+		} else {
+			var raw []byte
+			raw, err = b.decodeRawBytes(false)
+			if err == nil {
+				val, err = unmarshalLengthDelimitedField(fd, raw, m.mf)
+			}
+		}
+
+	case proto.WireStartGroup:
+		if fd.GetMessageType() == nil {
+			return fmt.Errorf("cannot parse field %s from group-encoded wire type", fd.GetFullyQualifiedName())
+		}
+		msg := m.mf.NewMessage(fd.GetMessageType())
+		if dm, ok := msg.(*Message); ok {
+			err = dm.unmarshal(b, true)
+			if err == nil {
+				val = dm
+			}
+		} else {
+			var groupEnd, dataEnd int
+			groupEnd, dataEnd, err = skipGroup(b)
+			if err == nil {
+				err = proto.Unmarshal(b.buf[b.index:dataEnd], msg)
+				if err == nil {
+					val = msg
+				}
+				b.index = groupEnd
+			}
+		}
+
+	default:
+		return proto.ErrInternalBadWireType
+	}
+	if err != nil {
+		return err
+	}
+
+	return mergeField(m, fd, val)
+}
+
+func (m *Message) unmarshalUnknownField(tagNumber int32, encoding int8, b *codedBuffer) error {
+	u := UnknownField{Encoding: encoding}
+	var err error
+	switch encoding {
+	case proto.WireFixed32:
+		u.Value, err = b.decodeFixed32()
+	case proto.WireFixed64:
+		u.Value, err = b.decodeFixed64()
+	case proto.WireVarint:
+		u.Value, err = b.decodeVarint()
+	case proto.WireBytes:
+		u.Contents, err = b.decodeRawBytes(true)
+	case proto.WireStartGroup:
+		var groupEnd, dataEnd int
+		groupEnd, dataEnd, err = skipGroup(b)
+		if err == nil {
+			u.Contents = make([]byte, dataEnd-b.index)
+			copy(u.Contents, b.buf[b.index:])
+			b.index = groupEnd
+		}
+	default:
+		err = proto.ErrInternalBadWireType
+	}
+	if err != nil {
+		return err
+	}
+	if m.unknownFields == nil {
+		m.unknownFields = map[int32][]UnknownField{}
+	}
+	m.unknownFields[tagNumber] = append(m.unknownFields[tagNumber], u)
+	return nil
+}
+
+func skipGroup(b *codedBuffer) (int, int, error) {
+	bs := b.buf
+	start := b.index
+	defer func() {
+		b.index = start
+	}()
+	for {
+		fieldStart := b.index
+		// read a field tag
+		_, wireType, err := b.decodeTagAndWireType()
+		if err != nil {
+			return 0, 0, err
+		}
+		// skip past the field's data
+		switch wireType {
+		case proto.WireFixed32:
+			if !b.skip(4) {
+				return 0, 0, io.ErrUnexpectedEOF
+			}
+		case proto.WireFixed64:
+			if !b.skip(8) {
+				return 0, 0, io.ErrUnexpectedEOF
+			}
+		case proto.WireVarint:
+			// skip varint by finding last byte (has high bit unset)
+			i := b.index
+			for {
+				if i >= len(bs) {
+					return 0, 0, io.ErrUnexpectedEOF
+				}
+				if bs[i]&0x80 == 0 {
+					break
+				}
+				i++
+			}
+			b.index = i + 1
+		case proto.WireBytes:
+			l, err := b.decodeVarint()
+			if err != nil {
+				return 0, 0, err
+			}
+			if !b.skip(int(l)) {
+				return 0, 0, io.ErrUnexpectedEOF
+			}
+		case proto.WireStartGroup:
+			endIndex, _, err := skipGroup(b)
+			if err != nil {
+				return 0, 0, err
+			}
+			b.index = endIndex
+		case proto.WireEndGroup:
+			return b.index, fieldStart, nil
+		default:
+			return 0, 0, proto.ErrInternalBadWireType
+		}
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/codec.go b/vendor/github.com/jhump/protoreflect/dynamic/codec.go
new file mode 100644
index 0000000..9d70ab7
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/codec.go
@@ -0,0 +1,350 @@
+package dynamic
+
+// A reader/writer type that assists with encoding and decoding protobuf's binary representation.
+// This code is largely a fork of proto.Buffer, which cannot be used because it has no exported
+// field or method that provides access to its underlying reader index.
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/golang/protobuf/proto"
+)
+
+// ErrOverflow is returned when an integer is too large to be represented.
+var ErrOverflow = errors.New("proto: integer overflow")
+
+type codedBuffer struct {
+	buf   []byte
+	index int
+}
+
+func newCodedBuffer(buf []byte) *codedBuffer {
+	return &codedBuffer{buf: buf}
+}
+
+func (cb *codedBuffer) reset() {
+	cb.buf = []byte(nil)
+	cb.index = 0
+}
+
+func (cb *codedBuffer) eof() bool {
+	return cb.index >= len(cb.buf)
+}
+
+func (cb *codedBuffer) skip(count int) bool {
+	newIndex := cb.index + count
+	if newIndex > len(cb.buf) {
+		return false
+	}
+	cb.index = newIndex
+	return true
+}
+
+func (cb *codedBuffer) decodeVarintSlow() (x uint64, err error) {
+	i := cb.index
+	l := len(cb.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := cb.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			cb.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = ErrOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *codedBuffer) decodeVarint() (uint64, error) {
+	i := cb.index
+	buf := cb.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		cb.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return cb.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x := uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+	// x -= 0x80 << 63 // Always zero.
+
+	return 0, ErrOverflow
+
+done:
+	cb.index = i
+	return x, nil
+}
+
+func (cb *codedBuffer) decodeTagAndWireType() (tag int32, wireType int8, err error) {
+	var v uint64
+	v, err = cb.decodeVarint()
+	if err != nil {
+		return
+	}
+	// low 7 bits is wire type
+	wireType = int8(v & 7)
+	// rest is int32 tag number
+	v = v >> 3
+	if v > math.MaxInt32 {
+		err = fmt.Errorf("tag number out of range: %d", v)
+		return
+	}
+	tag = int32(v)
+	return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *codedBuffer) decodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 8
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-8])
+	x |= uint64(cb.buf[i-7]) << 8
+	x |= uint64(cb.buf[i-6]) << 16
+	x |= uint64(cb.buf[i-5]) << 24
+	x |= uint64(cb.buf[i-4]) << 32
+	x |= uint64(cb.buf[i-3]) << 40
+	x |= uint64(cb.buf[i-2]) << 48
+	x |= uint64(cb.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *codedBuffer) decodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := cb.index + 4
+	if i < 0 || i > len(cb.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	cb.index = i
+
+	x = uint64(cb.buf[i-4])
+	x |= uint64(cb.buf[i-3]) << 8
+	x |= uint64(cb.buf[i-2]) << 16
+	x |= uint64(cb.buf[i-1]) << 24
+	return
+}
+
+func decodeZigZag32(v uint64) int32 {
+	return int32((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31))
+}
+
+func decodeZigZag64(v uint64) int64 {
+	return int64((v >> 1) ^ uint64((int64(v&1)<<63)>>63))
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *codedBuffer) decodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := cb.decodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := cb.index + nb
+	if end < cb.index || end > len(cb.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		buf = cb.buf[cb.index:end]
+		cb.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, cb.buf[cb.index:])
+	cb.index += nb
+	return
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (cb *codedBuffer) encodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		cb.buf = append(cb.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	cb.buf = append(cb.buf, uint8(x))
+	return nil
+}
+
+func (cb *codedBuffer) encodeTagAndWireType(tag int32, wireType int8) error {
+	v := uint64((int64(tag) << 3) | int64(wireType))
+	return cb.encodeVarint(v)
+}
+
+// TODO: decodeTagAndWireType
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (cb *codedBuffer) encodeFixed64(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (cb *codedBuffer) encodeFixed32(x uint64) error {
+	cb.buf = append(cb.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+func encodeZigZag64(v int64) uint64 {
+	return (uint64(v) << 1) ^ uint64(v>>63)
+}
+
+func encodeZigZag32(v int32) uint64 {
+	return uint64((uint32(v) << 1) ^ uint32((v >> 31)))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (cb *codedBuffer) encodeRawBytes(b []byte) error {
+	cb.encodeVarint(uint64(len(b)))
+	cb.buf = append(cb.buf, b...)
+	return nil
+}
+
+func (cb *codedBuffer) encodeMessage(pm proto.Message) error {
+	bytes, err := proto.Marshal(pm)
+	if err != nil {
+		return err
+	}
+	if len(bytes) == 0 {
+		return nil
+	}
+
+	if err := cb.encodeVarint(uint64(len(bytes))); err != nil {
+		return err
+	}
+	cb.buf = append(cb.buf, bytes...)
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/doc.go b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
new file mode 100644
index 0000000..c329fcd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/doc.go
@@ -0,0 +1,163 @@
+// Package dynamic provides an implementation for a dynamic protobuf message.
+//
+// The dynamic message is essentially a message descriptor along with a map of
+// tag numbers to values. It has a broad API for interacting with the message,
+// including inspection and modification. Generally, most operations have two
+// forms: a regular method that panics on bad input or error and a "Try" form
+// of the method that will instead return an error.
+//
+// A dynamic message can optionally be constructed with a MessageFactory. The
+// MessageFactory has various registries that may be used by the dynamic message,
+// such as during de-serialization. The message factory is "inherited" by any
+// other dynamic messages created, such as nested messages that are created
+// during de-serialization. Similarly, any dynamic message created using
+// MessageFactory.NewMessage will be associated with that factory, which in turn
+// will be used to create other messages or parse extension fields during
+// de-serialization.
+//
+//
+// Field Types
+//
+// The types of values expected by setters and returned by getters are the
+// same as protoc generates for scalar fields. For repeated fields, there are
+// methods for getting and setting values at a particular index or for adding
+// an element. Similarly, for map fields, there are methods for getting and
+// setting values for a particular key.
+//
+// If you use GetField for a repeated field, it will return a copy of all
+// elements as a slice []interface{}. Similarly, using GetField for a map field
+// will return a copy of all mappings as a map[interface{}]interface{}. You can
+// also use SetField to supply an entire slice or map for repeated or map fields.
+// The slice need not be []interface{} but can actually be typed according to
+// the field's expected type. For example, a repeated uint64 field can be set
+// using a slice of type []uint64.
+//
+// Descriptors for map fields describe them as repeated fields with a nested
+// message type. The nested message type is a special generated type that
+// represents a single mapping: key and value pair. The dynamic message has some
+// special affordances for this representation. For example, you can use
+// SetField to set a map field using a slice of these entry messages. Internally,
+// the slice of entries will be converted to an actual map. Similarly, you can
+// use AddRepeatedField with an entry message to add (or overwrite) a mapping.
+// However, you cannot use GetRepeatedField or SetRepeatedField to modify maps,
+// since those take numeric index arguments which are not relevant to maps
+// (since maps in Go have no defined ordering).
+//
+// When setting field values in dynamic messages, the type-checking is lenient
+// in that it accepts any named type with the right kind. So a string field can
+// be assigned to any type that is defined as a string. Enum fields require
+// int32 values (or any type that is defined as an int32).
+//
+// Unlike normal use of numeric values in Go, values will be automatically
+// widened when assigned. So, for example, an int64 field can be set using an
+// int32 value since it can be safely widened without truncation or loss of
+// precision. Similar goes for uint32 values being converted to uint64 and
+// float32 being converted to float64. Narrowing conversions are not done,
+// however. Also, unsigned values will never be automatically converted to
+// signed (and vice versa), and floating point values will never be
+// automatically converted to integral values (and vice versa). Since the bit
+// width of int and uint fields is allowed to be platform dependent, but will
+// always be less than or equal to 64, they can only be used as values for
+// int64 and uint64 fields, respectively. They cannot be used to set int32 or
+// uint32 fields, which includes enums fields.
+//
+// Fields whose type is a nested message can have values set to either other
+// dynamic messages or generated messages (e.g. pointers to structs generated by
+// protoc). Getting a value for such a field will return the actual type it is
+// set to (e.g. either a dynamic message or a generated message). If the value
+// is not set and the message uses proto2 syntax, the default message returned
+// will be whatever is returned by the dynamic message's MessageFactory (if the
+// dynamic message was not created with a factory, it will use the logic of the
+// zero value factory). In most typical cases, it will return a dynamic message,
+// but if the factory is configured with a KnownTypeRegistry, or if the field's
+// type is a well-known type, it will return a zero value generated message.
+//
+//
+// Unrecognized Fields
+//
+// Unrecognized fields are preserved by the dynamic message when unmarshaling
+// from the standard binary format. If the message's MessageFactory was
+// configured with an ExtensionRegistry, it will be used to identify and parse
+// extension fields for the message.
+//
+// Unrecognized fields can dynamically become recognized fields if the
+// application attempts to retrieve an unrecognized field's value using a
+// FieldDescriptor. In this case, the given FieldDescriptor is used to parse the
+// unknown field and move the parsed value into the message's set of known
+// fields. This behavior is most suited to the use of extensions, where an
+// ExtensionRegistry is not setup with all known extensions ahead of time. But
+// it can even happen for non-extension fields! Here's an example scenario where
+// a non-extension field can initially be unknown and become known:
+//
+//   1. A dynamic message is created with a descriptor, A, and then
+//      de-serialized from a stream of bytes. The stream includes an
+//      unrecognized tag T. The message will include tag T in its unrecognized
+//      field set.
+//   2. Another call site retrieves a newer descriptor, A', which includes a
+//      newly added field with tag T.
+//   3. That other call site then uses a FieldDescriptor to access the value of
+//      the new field. This will cause the dynamic message to parse the bytes
+//      for the unknown tag T and store them as a known field.
+//   4. Subsequent operations for tag T, including setting the field using only
+//      tag number or de-serializing a stream that includes tag T, will operate
+//      as if that tag were part of the original descriptor, A.
+//
+//
+// Compatibility
+//
+// In addition to implementing the proto.Message interface, the included
+// Message type also provides an XXX_MessageName() method, so it can work with
+// proto.MessageName. And it provides a Descriptor() method that behaves just
+// like the method of the same signature in messages generated by protoc.
+// Because of this, it is actually compatible with proto.Message in many (though
+// not all) contexts. In particular, it is compatible with proto.Marshal and
+// proto.Unmarshal for serializing and de-serializing messages.
+//
+// The dynamic message supports binary and text marshaling, using protobuf's
+// well-defined binary format and the same text format that protoc-generated
+// types use. It also supports JSON serialization/de-serialization by
+// implementing the json.Marshaler and json.Unmarshaler interfaces. And dynamic
+// messages can safely be used with the jsonpb package for JSON serialization
+// and de-serialization.
+//
+// In addition to implementing the proto.Message interface and numerous related
+// methods, it also provides inter-op with generated messages via conversion.
+// The ConvertTo, ConvertFrom, MergeInto, and MergeFrom methods copy message
+// contents from a dynamic message to a generated message and vice versa.
+//
+// When copying from a generated message into a dynamic message, if the
+// generated message contains fields unknown to the dynamic message (e.g. not
+// present in the descriptor used to create the dynamic message), these fields
+// become known to the dynamic message (as per behavior described above in
+// "Unrecognized Fields"). If the generated message has unrecognized fields of
+// its own, including unrecognized extensions, they are preserved in the dynamic
+// message. It is possible that the dynamic message knows about fields that the
+// generated message did not, like if it has a different version of the
+// descriptor or its MessageFactory has an ExtensionRegistry that knows about
+// different extensions than were linked into the program. In this case, these
+// unrecognized fields in the generated message will be known fields in the
+// dynamic message.
+//
+// Similarly, when copying from a dynamic message into a generated message, if
+// the dynamic message has unrecognized fields they can be preserved in the
+// generated message (currently only for syntax proto2 since proto3 generated
+// messages do not preserve unrecognized fields). If the generated message knows
+// about fields that the dynamic message does not, these unrecognized fields may
+// become known fields in the generated message.
+//
+//
+// Registries
+//
+// This package also contains a couple of registries, for managing known types
+// and descriptors.
+//
+// The KnownTypeRegistry allows de-serialization of a dynamic message to use
+// generated message types, instead of dynamic messages, for some kinds of
+// nested message fields. This is particularly useful for working with proto
+// messages that have special encodings as JSON (e.g. the well-known types),
+// since the dynamic message does not try to handle these special cases in its
+// JSON marshaling facilities.
+//
+// The ExtensionRegistry allows for recognizing and parsing extensions fields
+// (for proto2 messages).
+package dynamic
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
new file mode 100644
index 0000000..ac7e52f
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/dynamic_message.go
@@ -0,0 +1,2710 @@
+package dynamic
+
+import (
+	"bytes"
+	"compress/gzip"
+	"errors"
+	"fmt"
+	"reflect"
+	"sort"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ErrUnknownTagNumber is an error that is returned when an operation refers
+// to an unknown tag number.
+var ErrUnknownTagNumber = errors.New("unknown tag number")
+
+// UnknownTagNumberError is the same as ErrUnknownTagNumber.
+// Deprecated: use ErrUnknownTagNumber
+var UnknownTagNumberError = ErrUnknownTagNumber
+
+// ErrUnknownFieldName is an error that is returned when an operation refers
+// to an unknown field name.
+var ErrUnknownFieldName = errors.New("unknown field name")
+
+// UnknownFieldNameError is the same as ErrUnknownFieldName.
+// Deprecated: use ErrUnknownFieldName
+var UnknownFieldNameError = ErrUnknownFieldName
+
+// ErrFieldIsNotMap is an error that is returned when map-related operations
+// are attempted with fields that are not maps.
+var ErrFieldIsNotMap = errors.New("field is not a map type")
+
+// FieldIsNotMapError is the same as ErrFieldIsNotMap.
+// Deprecated: use ErrFieldIsNotMap
+var FieldIsNotMapError = ErrFieldIsNotMap
+
+// ErrFieldIsNotRepeated is an error that is returned when repeated field
+// operations are attempted with fields that are not repeated.
+var ErrFieldIsNotRepeated = errors.New("field is not repeated")
+
+// FieldIsNotRepeatedError is the same as ErrFieldIsNotRepeated.
+// Deprecated: use ErrFieldIsNotRepeated
+var FieldIsNotRepeatedError = ErrFieldIsNotRepeated
+
+// ErrIndexOutOfRange is an error that is returned when an invalid index is
+// provided when access a single element of a repeated field.
+var ErrIndexOutOfRange = errors.New("index is out of range")
+
+// IndexOutOfRangeError is the same as ErrIndexOutOfRange.
+// Deprecated: use ErrIndexOutOfRange
+var IndexOutOfRangeError = ErrIndexOutOfRange
+
+// ErrNumericOverflow is an error returned by operations that encounter a
+// numeric value that is too large, for example de-serializing a value into an
+// int32 field when the value is larger that can fit into a 32-bit value.
+var ErrNumericOverflow = errors.New("numeric value is out of range")
+
+// NumericOverflowError is the same as ErrNumericOverflow.
+// Deprecated: use ErrNumericOverflow
+var NumericOverflowError = ErrNumericOverflow
+
+var typeOfProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+var typeOfDynamicMessage = reflect.TypeOf((*Message)(nil))
+var typeOfBytes = reflect.TypeOf(([]byte)(nil))
+
+var varintTypes = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed32Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+var fixed64Types = map[descriptor.FieldDescriptorProto_Type]bool{}
+
+func init() {
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_BOOL] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_INT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_UINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT32] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_SINT64] = true
+	varintTypes[descriptor.FieldDescriptorProto_TYPE_ENUM] = true
+
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_SFIXED32] = true
+	fixed32Types[descriptor.FieldDescriptorProto_TYPE_FLOAT] = true
+
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_FIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_SFIXED64] = true
+	fixed64Types[descriptor.FieldDescriptorProto_TYPE_DOUBLE] = true
+}
+
+// Message is a dynamic protobuf message. Instead of a generated struct,
+// like most protobuf messages, this is a map of field number to values and
+// a message descriptor, which is used to validate the field values and
+// also to de-serialize messages (from the standard binary format, as well
+// as from the text format and from JSON).
+type Message struct {
+	md            *desc.MessageDescriptor
+	er            *ExtensionRegistry
+	mf            *MessageFactory
+	extraFields   map[int32]*desc.FieldDescriptor
+	values        map[int32]interface{}
+	unknownFields map[int32][]UnknownField
+}
+
+// UnknownField represents a field that was parsed from the binary wire
+// format for a message, but was not a recognized field number. Enough
+// information is preserved so that re-serializing the message won't lose
+// any of the unrecognized data.
+type UnknownField struct {
+	// Encoding indicates how the unknown field was encoded on the wire. If it
+	// is proto.WireBytes or proto.WireGroupStart then Contents will be set to
+	// the raw bytes. If it is proto.WireTypeFixed32 then the data is in the least
+	// significant 32 bits of Value. Otherwise, the data is in all 64 bits of
+	// Value.
+	Encoding int8
+	Contents []byte
+	Value    uint64
+}
+
+// NewMessage creates a new dynamic message for the type represented by the given
+// message descriptor. During de-serialization, a default MessageFactory is used to
+// instantiate any nested message fields and no extension fields will be parsed. To
+// use a custom MessageFactory or ExtensionRegistry, use MessageFactory.NewMessage.
+func NewMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, nil)
+}
+
+// NewMessageWithExtensionRegistry creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// ExtensionRegistry is used to parse extension fields and nested messages will be
+// instantiated using dynamic.NewMessageFactoryWithExtensionRegistry(er).
+func NewMessageWithExtensionRegistry(md *desc.MessageDescriptor, er *ExtensionRegistry) *Message {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return NewMessageWithMessageFactory(md, mf)
+}
+
+// NewMessageWithMessageFactory creates a new dynamic message for the type
+// represented by the given message descriptor. During de-serialization, the given
+// MessageFactory is used to instantiate nested messages.
+func NewMessageWithMessageFactory(md *desc.MessageDescriptor, mf *MessageFactory) *Message {
+	var er *ExtensionRegistry
+	if mf != nil {
+		er = mf.er
+	}
+	return &Message{
+		md: md,
+		mf: mf,
+		er: er,
+	}
+}
+
+// AsDynamicMessage converts the given message to a dynamic message. If the
+// given message is dynamic, it is returned. Otherwise, a dynamic message is
+// created using NewMessage.
+func AsDynamicMessage(msg proto.Message) (*Message, error) {
+	return AsDynamicMessageWithMessageFactory(msg, nil)
+}
+
+// AsDynamicMessageWithExtensionRegistry converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithExtensionRegistry.
+func AsDynamicMessageWithExtensionRegistry(msg proto.Message, er *ExtensionRegistry) (*Message, error) {
+	mf := NewMessageFactoryWithExtensionRegistry(er)
+	return AsDynamicMessageWithMessageFactory(msg, mf)
+}
+
+// AsDynamicMessageWithMessageFactory converts the given message to a dynamic
+// message. If the given message is dynamic, it is returned. Otherwise, a
+// dynamic message is created using NewMessageWithMessageFactory.
+func AsDynamicMessageWithMessageFactory(msg proto.Message, mf *MessageFactory) (*Message, error) {
+	if dm, ok := msg.(*Message); ok {
+		return dm, nil
+	}
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return nil, err
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	err = dm.mergeFrom(msg)
+	if err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+// GetMessageDescriptor returns a descriptor for this message's type.
+func (m *Message) GetMessageDescriptor() *desc.MessageDescriptor {
+	return m.md
+}
+
+// GetKnownFields returns a slice of descriptors for all known fields. The
+// fields will not be in any defined order.
+func (m *Message) GetKnownFields() []*desc.FieldDescriptor {
+	if len(m.extraFields) == 0 {
+		return m.md.GetFields()
+	}
+	flds := make([]*desc.FieldDescriptor, len(m.md.GetFields()), len(m.md.GetFields())+len(m.extraFields))
+	copy(flds, m.md.GetFields())
+	for _, fld := range m.extraFields {
+		if !fld.IsExtension() {
+			flds = append(flds, fld)
+		}
+	}
+	return flds
+}
+
+// GetKnownExtensions returns a slice of descriptors for all extensions known by
+// the message's extension registry. The fields will not be in any defined order.
+func (m *Message) GetKnownExtensions() []*desc.FieldDescriptor {
+	if !m.md.IsExtendable() {
+		return nil
+	}
+	exts := m.er.AllExtensionsForType(m.md.GetFullyQualifiedName())
+	for _, fld := range m.extraFields {
+		if fld.IsExtension() {
+			exts = append(exts, fld)
+		}
+	}
+	return exts
+}
+
+// GetUnknownFields returns a slice of tag numbers for all unknown fields that
+// this message contains. The tags will not be in any defined order.
+func (m *Message) GetUnknownFields() []int32 {
+	flds := make([]int32, 0, len(m.unknownFields))
+	for tag := range m.unknownFields {
+		flds = append(flds, tag)
+	}
+	return flds
+}
+
+// Descriptor returns the serialized form of the file descriptor in which the
+// message was defined and a path to the message type therein. This mimics the
+// method of the same name on message types generated by protoc.
+func (m *Message) Descriptor() ([]byte, []int) {
+	// get encoded file descriptor
+	b, err := proto.Marshal(m.md.GetFile().AsProto())
+	if err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	var zippedBytes bytes.Buffer
+	w := gzip.NewWriter(&zippedBytes)
+	if _, err := w.Write(b); err != nil {
+		panic(fmt.Sprintf("failed to get encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+	if err := w.Close(); err != nil {
+		panic(fmt.Sprintf("failed to get an encoded descriptor for %s: %v", m.md.GetFile().GetName(), err))
+	}
+
+	// and path to message
+	path := []int{}
+	var d desc.Descriptor
+	name := m.md.GetFullyQualifiedName()
+	for d = m.md.GetParent(); d != nil; name, d = d.GetFullyQualifiedName(), d.GetParent() {
+		found := false
+		switch d := d.(type) {
+		case (*desc.FileDescriptor):
+			for i, md := range d.GetMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		case (*desc.MessageDescriptor):
+			for i, md := range d.GetNestedMessageTypes() {
+				if md.GetFullyQualifiedName() == name {
+					found = true
+					path = append(path, i)
+				}
+			}
+		}
+		if !found {
+			panic(fmt.Sprintf("failed to compute descriptor path for %s", m.md.GetFullyQualifiedName()))
+		}
+	}
+	// reverse the path
+	i := 0
+	j := len(path) - 1
+	for i < j {
+		path[i], path[j] = path[j], path[i]
+		i++
+		j--
+	}
+
+	return zippedBytes.Bytes(), path
+}
+
+// XXX_MessageName returns the fully qualified name of this message's type. This
+// allows dynamic messages to be used with proto.MessageName.
+func (m *Message) XXX_MessageName() string {
+	return m.md.GetFullyQualifiedName()
+}
+
+// FindFieldDescriptor returns a field descriptor for the given tag number. This
+// searches known fields in the descriptor, known fields discovered during calls
+// to GetField or SetField, and extension fields known by the message's extension
+// registry. It returns nil if the tag is unknown.
+func (m *Message) FindFieldDescriptor(tagNumber int32) *desc.FieldDescriptor {
+	fd := m.md.FindFieldByNumber(tagNumber)
+	if fd != nil {
+		return fd
+	}
+	fd = m.er.FindExtension(m.md.GetFullyQualifiedName(), tagNumber)
+	if fd != nil {
+		return fd
+	}
+	return m.extraFields[tagNumber]
+}
+
+// FindFieldDescriptorByName returns a field descriptor for the given field
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. It returns nil if the name is unknown. If the
+// given name refers to an extension, it should be fully qualified and may be
+// optionally enclosed in parentheses or brackets.
+func (m *Message) FindFieldDescriptorByName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetName() {
+			return fd
+		}
+	}
+
+	return nil
+}
+
+// FindFieldDescriptorByJSONName returns a field descriptor for the given JSON
+// name. This searches known fields in the descriptor, known fields discovered
+// during calls to GetField or SetField, and extension fields known by the
+// message's extension registry. If no field matches the given JSON name, it
+// will fall back to searching field names (e.g. FindFieldDescriptorByName). If
+// this also yields no match, nil is returned.
+func (m *Message) FindFieldDescriptorByJSONName(name string) *desc.FieldDescriptor {
+	if name == "" {
+		return nil
+	}
+	fd := m.md.FindFieldByJSONName(name)
+	if fd != nil {
+		return fd
+	}
+	mustBeExt := false
+	if name[0] == '(' {
+		if name[len(name)-1] != ')' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	} else if name[0] == '[' {
+		if name[len(name)-1] != ']' {
+			// malformed name
+			return nil
+		}
+		mustBeExt = true
+		name = name[1 : len(name)-1]
+	}
+	fd = m.er.FindExtensionByJSONName(m.md.GetFullyQualifiedName(), name)
+	if fd != nil {
+		return fd
+	}
+	for _, fd := range m.extraFields {
+		if fd.IsExtension() && name == fd.GetFullyQualifiedJSONName() {
+			return fd
+		} else if !mustBeExt && !fd.IsExtension() && name == fd.GetJSONName() {
+			return fd
+		}
+	}
+
+	// try non-JSON names
+	return m.FindFieldDescriptorByName(name)
+}
+
+func (m *Message) checkField(fd *desc.FieldDescriptor) error {
+	return checkField(fd, m.md)
+}
+
+func checkField(fd *desc.FieldDescriptor, md *desc.MessageDescriptor) error {
+	if fd.GetOwner().GetFullyQualifiedName() != md.GetFullyQualifiedName() {
+		return fmt.Errorf("given field, %s, is for wrong message type: %s; expecting %s", fd.GetName(), fd.GetOwner().GetFullyQualifiedName(), md.GetFullyQualifiedName())
+	}
+	if fd.IsExtension() && !md.IsExtension(fd.GetNumber()) {
+		return fmt.Errorf("given field, %s, is an extension but is not in message extension range: %v", fd.GetFullyQualifiedName(), md.GetExtensionRanges())
+	}
+	return nil
+}
+
+// GetField returns the value for the given field descriptor. It panics if an
+// error is encountered. See TryGetField.
+func (m *Message) GetField(fd *desc.FieldDescriptor) interface{} {
+	if v, err := m.TryGetField(fd); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetField returns the value for the given field descriptor. An error is
+// returned if the given field descriptor does not belong to the right message
+// type.
+//
+// The Go type of the returned value, for scalar fields, is the same as protoc
+// would generate for the field (in a non-dynamic message). The table below
+// lists the scalar types and the corresponding Go types.
+//  +-------------------------+-----------+
+//  |       Declared Type     |  Go Type  |
+//  +-------------------------+-----------+
+//  | int32, sint32, sfixed32 | int32     |
+//  | int64, sint64, sfixed64 | int64     |
+//  | uint32, fixed32         | uint32    |
+//  | uint64, fixed64         | uint64    |
+//  | float                   | float32   |
+//  | double                  | double32  |
+//  | bool                    | bool      |
+//  | string                  | string    |
+//  | bytes                   | []byte    |
+//  +-------------------------+-----------+
+//
+// Values for enum fields will always be int32 values. You can use the enum
+// descriptor associated with the field to lookup value names with those values.
+// Values for message type fields may be an instance of the generated type *or*
+// may be another *dynamic.Message that represents the type.
+//
+// If the given field is a map field, the returned type will be
+// map[interface{}]interface{}. The actual concrete types of keys and values is
+// as described above. If the given field is a (non-map) repeated field, the
+// returned type is always []interface{}; the type of the actual elements is as
+// described above.
+//
+// If this message has no value for the given field, its default value is
+// returned. If the message is defined in a file with "proto3" syntax, the
+// default is always the zero value for the field. The default value for map and
+// repeated fields is a nil map or slice (respectively). For field's whose types
+// is a message, the default value is an empty message for "proto2" syntax or a
+// nil message for "proto3" syntax. Note that the in the latter case, a non-nil
+// interface with a nil pointer is returned, not a nil interface. Also note that
+// whether the returned value is an empty message or nil depends on if *this*
+// message was defined as "proto3" syntax, not the message type referred to by
+// the field's type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be returned, or an error will
+// be returned if the unknown value cannot be parsed according to the field
+// descriptor's type information.
+func (m *Message) TryGetField(fd *desc.FieldDescriptor) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByName returns the value for the field with the given name. It panics
+// if an error is encountered. See TryGetFieldByName.
+func (m *Message) GetFieldByName(name string) interface{} {
+	if v, err := m.TryGetFieldByName(name); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByName returns the value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByName(name string) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getField(fd)
+}
+
+// GetFieldByNumber returns the value for the field with the given tag number.
+// It panics if an error is encountered. See TryGetFieldByNumber.
+func (m *Message) GetFieldByNumber(tagNumber int) interface{} {
+	if v, err := m.TryGetFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetFieldByNumber returns the value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+//
+// If this message has no value for the given field, its default value is
+// returned. (See TryGetField for more info on types and default field values.)
+func (m *Message) TryGetFieldByNumber(tagNumber int) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getField(fd)
+}
+
+func (m *Message) getField(fd *desc.FieldDescriptor) (interface{}, error) {
+	return m.doGetField(fd, false)
+}
+
+func (m *Message) doGetField(fd *desc.FieldDescriptor, nilIfAbsent bool) (interface{}, error) {
+	res := m.values[fd.GetNumber()]
+	if res == nil {
+		var err error
+		if res, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		}
+		if res == nil {
+			if nilIfAbsent {
+				return nil, nil
+			} else {
+				def := fd.GetDefaultValue()
+				if def != nil {
+					return def, nil
+				}
+				// GetDefaultValue only returns nil for message types
+				md := fd.GetMessageType()
+				if md.IsProto3() {
+					// try to return a proper nil pointer
+					msgType := proto.MessageType(md.GetFullyQualifiedName())
+					if msgType != nil && msgType.Implements(typeOfProtoMessage) {
+						return reflect.Zero(msgType).Interface().(proto.Message), nil
+					}
+					// fallback to nil dynamic message pointer
+					return (*Message)(nil), nil
+				} else {
+					// for proto2, return default instance of message
+					return m.mf.NewMessage(md), nil
+				}
+			}
+		}
+	}
+	rt := reflect.TypeOf(res)
+	if rt.Kind() == reflect.Map {
+		// make defensive copies to prevent caller from storing illegal keys and values
+		m := res.(map[interface{}]interface{})
+		res := map[interface{}]interface{}{}
+		for k, v := range m {
+			res[k] = v
+		}
+		return res, nil
+	} else if rt.Kind() == reflect.Slice && rt != typeOfBytes {
+		// make defensive copies to prevent caller from storing illegal elements
+		sl := res.([]interface{})
+		res := make([]interface{}, len(sl))
+		copy(res, sl)
+		return res, nil
+	}
+	return res, nil
+}
+
+// HasField returns true if this message has a value for the given field. If the
+// given field is not valid (e.g. belongs to a different message type), false is
+// returned. If this message is defined in a file with "proto3" syntax, this
+// will return false even if a field was explicitly assigned its zero value (the
+// zero values for a field are intentionally indistinguishable from absent).
+func (m *Message) HasField(fd *desc.FieldDescriptor) bool {
+	if err := m.checkField(fd); err != nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldName returns true if this message has a value for a field with the
+// given name. If the given name is unknown, this returns false.
+func (m *Message) HasFieldName(name string) bool {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return false
+	}
+	return m.HasFieldNumber(int(fd.GetNumber()))
+}
+
+// HasFieldNumber returns true if this message has a value for a field with the
+// given tag number. If the given tag is unknown, this returns false.
+func (m *Message) HasFieldNumber(tagNumber int) bool {
+	if _, ok := m.values[int32(tagNumber)]; ok {
+		return true
+	}
+	_, ok := m.unknownFields[int32(tagNumber)]
+	return ok
+}
+
+// SetField sets the value for the given field descriptor to the given value. It
+// panics if an error is encountered. See TrySetField.
+func (m *Message) SetField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TrySetField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetField sets the value for the given field descriptor to the given value.
+// An error is returned if the given field descriptor does not belong to the
+// right message type or if the given value is not a correct/compatible type for
+// the given field.
+//
+// The Go type expected for a field  is the same as TryGetField would return for
+// the field. So message values can be supplied as either the correct generated
+// message type or as a *dynamic.Message.
+//
+// Since it is cumbersome to work with dynamic messages, some concessions are
+// made to simplify usage regarding types:
+//
+//  1. If a numeric type is provided that can be converted *without loss or
+//     overflow*, it is accepted. This allows for setting int64 fields using int
+//     or int32 values. Similarly for uint64 with uint and uint32 values and for
+//     float64 fields with float32 values.
+//  2. The value can be a named type, as long as its underlying type is correct.
+//  3. Map and repeated fields can be set using any kind of concrete map or
+//     slice type, as long as the values within are all of the correct type. So
+//     a field defined as a 'map<string, int32>` can be set using a
+//     map[string]int32, a map[string]interface{}, or even a
+//     map[interface{}]interface{}.
+//  4. Finally, dynamic code that chooses to not treat maps as a special-case
+//     find that they can set map fields using a slice where each element is a
+//     message that matches the implicit map-entry field message type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TrySetField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByName sets the value for the field with the given name to the given
+// value. It panics if an error is encountered. See TrySetFieldByName.
+func (m *Message) SetFieldByName(name string, val interface{}) {
+	if err := m.TrySetFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByName sets the value for the field with the given name to the
+// given value. An error is returned if the given name is unknown or if the
+// given value has an incorrect type. If the given name refers to an extension
+// field, it should be fully qualified and optionally enclosed in parenthesis or
+// brackets.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setField(fd, val)
+}
+
+// SetFieldByNumber sets the value for the field with the given tag number to
+// the given value. It panics if an error is encountered. See
+// TrySetFieldByNumber.
+func (m *Message) SetFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TrySetFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetFieldByNumber sets the value for the field with the given tag number to
+// the given value. An error is returned if the given tag is unknown or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setField(fd, val)
+}
+
+func (m *Message) setField(fd *desc.FieldDescriptor, val interface{}) error {
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+	m.internalSetField(fd, val)
+	return nil
+}
+
+func (m *Message) internalSetField(fd *desc.FieldDescriptor, val interface{}) {
+	if fd.IsRepeated() {
+		// Unset fields and zero-length fields are indistinguishable, in both
+		// proto2 and proto3 syntax
+		if reflect.ValueOf(val).Len() == 0 {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	} else if m.md.IsProto3() && fd.GetOneOf() == nil {
+		// proto3 considers fields that are set to their zero value as unset
+		// (we already handled repeated fields above)
+		var equal bool
+		if b, ok := val.([]byte); ok {
+			// can't compare slices, so we have to special-case []byte values
+			equal = ok && bytes.Equal(b, fd.GetDefaultValue().([]byte))
+		} else {
+			defVal := fd.GetDefaultValue()
+			equal = defVal == val
+			if !equal && defVal == nil {
+				// above just checks if value is the nil interface,
+				// but we should also test if the given value is a
+				// nil pointer
+				rv := reflect.ValueOf(val)
+				if rv.Kind() == reflect.Ptr && rv.IsNil() {
+					equal = true
+				}
+			}
+		}
+		if equal {
+			if m.values != nil {
+				delete(m.values, fd.GetNumber())
+			}
+			return
+		}
+	}
+	if m.values == nil {
+		m.values = map[int32]interface{}{}
+	}
+	m.values[fd.GetNumber()] = val
+	// if this field is part of a one-of, make sure all other one-of choices are cleared
+	od := fd.GetOneOf()
+	if od != nil {
+		for _, other := range od.GetChoices() {
+			if other.GetNumber() != fd.GetNumber() {
+				delete(m.values, other.GetNumber())
+			}
+		}
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+func (m *Message) addField(fd *desc.FieldDescriptor) {
+	if m.extraFields == nil {
+		m.extraFields = map[int32]*desc.FieldDescriptor{}
+	}
+	m.extraFields[fd.GetNumber()] = fd
+}
+
+// ClearField removes any value for the given field. It panics if an error is
+// encountered. See TryClearField.
+func (m *Message) ClearField(fd *desc.FieldDescriptor) {
+	if err := m.TryClearField(fd); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearField removes any value for the given field. An error is returned if
+// the given field descriptor does not belong to the right message type.
+func (m *Message) TryClearField(fd *desc.FieldDescriptor) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByName removes any value for the field with the given name. It
+// panics if an error is encountered. See TryClearFieldByName.
+func (m *Message) ClearFieldByName(name string) {
+	if err := m.TryClearFieldByName(name); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByName removes any value for the field with the given name. An
+// error is returned if the given name is unknown. If the given name refers to
+// an extension field, it should be fully qualified and optionally enclosed in
+// parenthesis or brackets.
+func (m *Message) TryClearFieldByName(name string) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+// ClearFieldByNumber removes any value for the field with the given tag number.
+// It panics if an error is encountered. See TryClearFieldByNumber.
+func (m *Message) ClearFieldByNumber(tagNumber int) {
+	if err := m.TryClearFieldByNumber(tagNumber); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearFieldByNumber removes any value for the field with the given tag
+// number. An error is returned if the given tag is unknown.
+func (m *Message) TryClearFieldByNumber(tagNumber int) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	m.clearField(fd)
+	return nil
+}
+
+func (m *Message) clearField(fd *desc.FieldDescriptor) {
+	// clear value
+	if m.values != nil {
+		delete(m.values, fd.GetNumber())
+	}
+	// also clear any unknown fields
+	if m.unknownFields != nil {
+		delete(m.unknownFields, fd.GetNumber())
+	}
+	// and add this field if it was previously unknown
+	if existing := m.FindFieldDescriptor(fd.GetNumber()); existing == nil {
+		m.addField(fd)
+	}
+}
+
+// GetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. It panics if an error is encountered. See
+// TryGetOneOfField.
+func (m *Message) GetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}) {
+	if fd, val, err := m.TryGetOneOfField(od); err != nil {
+		panic(err.Error())
+	} else {
+		return fd, val
+	}
+}
+
+// TryGetOneOfField returns which of the given one-of's fields is set and the
+// corresponding value. An error is returned if the given one-of belongs to the
+// wrong message type. If the given one-of has no field set, this method will
+// return nil, nil.
+//
+// The type of the value, if one is set, is the same as would be returned by
+// TryGetField using the returned field descriptor.
+//
+// Like with TryGetField, if the given one-of contains any fields that are not
+// known (e.g. not present in this message's descriptor), they will become known
+// and any unknown value will be parsed (and become a known value on success).
+func (m *Message) TryGetOneOfField(od *desc.OneOfDescriptor) (*desc.FieldDescriptor, interface{}, error) {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return nil, nil, fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		val, err := m.doGetField(fd, true)
+		if err != nil {
+			return nil, nil, err
+		}
+		if val != nil {
+			return fd, val, nil
+		}
+	}
+	return nil, nil, nil
+}
+
+// ClearOneOfField removes any value for any of the given one-of's fields. It
+// panics if an error is encountered. See TryClearOneOfField.
+func (m *Message) ClearOneOfField(od *desc.OneOfDescriptor) {
+	if err := m.TryClearOneOfField(od); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryClearOneOfField removes any value for any of the given one-of's fields. An
+// error is returned if the given one-of descriptor does not belong to the right
+// message type.
+func (m *Message) TryClearOneOfField(od *desc.OneOfDescriptor) error {
+	if od.GetOwner().GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given one-of, %s, is for wrong message type: %s; expecting %s", od.GetName(), od.GetOwner().GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+	}
+	for _, fd := range od.GetChoices() {
+		m.clearField(fd)
+	}
+	return nil
+}
+
+// GetMapField returns the value for the given map field descriptor and given
+// key. It panics if an error is encountered. See TryGetMapField.
+func (m *Message) GetMapField(fd *desc.FieldDescriptor, key interface{}) interface{} {
+	if v, err := m.TryGetMapField(fd, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapField returns the value for the given map field descriptor and given
+// key. An error is returned if the given field descriptor does not belong to
+// the right message type or if it is not a map field.
+//
+// If the map field does not contain the requested key, this method returns
+// nil, nil. The Go type of the value returned mirrors the type that protoc
+// would generate for the field. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryGetMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByName returns the value for the map field with the given name and
+// given key. It panics if an error is encountered. See TryGetMapFieldByName.
+func (m *Message) GetMapFieldByName(name string, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByName returns the value for the map field with the given name
+// and given key. An error is returned if the given name is unknown or if it
+// names a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByName(name string, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getMapField(fd, key)
+}
+
+// GetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. It panics if an error is encountered. See
+// TryGetMapFieldByNumber.
+func (m *Message) GetMapFieldByNumber(tagNumber int, key interface{}) interface{} {
+	if v, err := m.TryGetMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetMapFieldByNumber returns the value for the map field with the given tag
+// number and given key. An error is returned if the given tag is unknown or if
+// it indicates a field that is not a map field.
+//
+// If this message has no value for the given field or the value has no value
+// for the requested key, then this method returns nil, nil.
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetMapFieldByNumber(tagNumber int, key interface{}) (interface{}, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getMapField(fd, key)
+}
+
+func (m *Message) getMapField(fd *desc.FieldDescriptor, key interface{}) (interface{}, error) {
+	if !fd.IsMap() {
+		return nil, FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return nil, err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if mp == nil {
+			return nil, nil
+		}
+	}
+	return mp.(map[interface{}]interface{})[ki], nil
+}
+
+// ForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. It stops iteration if the function
+// returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntry.
+func (m *Message) ForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntry(fd, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntry executes the given function for each entry in the map
+// value for the given field descriptor. An error is returned if the given field
+// descriptor does not belong to the right message type or if it is not a  map
+// field.
+//
+// Iteration ends either when all entries have been examined or when the given
+// function returns false. So the function is expected to return true for normal
+// iteration and false to break out. If this message has no value for the given
+// field, it returns without invoking the given function.
+//
+// The Go type of the key and value supplied to the function mirrors the type
+// that protoc would generate for the field. (See TryGetField for more details
+// on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The parsed value will be searched for the requested
+// key and any value returned. An error will be returned if the unknown value
+// cannot be parsed according to the field descriptor's type information.
+func (m *Message) TryForEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByName executes the given function for each entry in the
+// map value for the field with the given name. It stops iteration if the
+// function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByName.
+func (m *Message) ForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByName(name, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByName executes the given function for each entry in
+// the map value for the field with the given name. It stops iteration if the
+// function returns false. An error is returned if the given name is unknown or
+// if it names a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByName(name string, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+// ForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. It panics if an error is encountered. See
+// TryForEachMapFieldEntryByNumber.
+func (m *Message) ForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) {
+	if err := m.TryForEachMapFieldEntryByNumber(tagNumber, fn); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryForEachMapFieldEntryByNumber executes the given function for each entry in
+// the map value for the field with the given tag number. It stops iteration if
+// the function returns false. An error is returned if the given tag is unknown
+// or if it indicates a field that is not a map field.
+//
+// If this message has no value for the given field, it returns without ever
+// invoking the given function.
+//
+// (See TryGetField for more info on types supplied to the function.)
+func (m *Message) TryForEachMapFieldEntryByNumber(tagNumber int, fn func(key, val interface{}) bool) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.forEachMapFieldEntry(fd, fn)
+}
+
+func (m *Message) forEachMapFieldEntry(fd *desc.FieldDescriptor, fn func(key, val interface{}) bool) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err := m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	for k, v := range mp.(map[interface{}]interface{}) {
+		if !fn(k, v) {
+			break
+		}
+	}
+	return nil
+}
+
+// PutMapField sets the value for the given map field descriptor and given key
+// to the given value. It panics if an error is encountered. See TryPutMapField.
+func (m *Message) PutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) {
+	if err := m.TryPutMapField(fd, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapField sets the value for the given map field descriptor and given
+// key to the given value. An error is returned if the given field descriptor
+// does not belong to the right message type, if the given field is not a map
+// field, or if the given value is not a correct/compatible type for the given
+// field.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a field with the same type as the map's value type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is cleared, replaced by the given known
+// value.
+func (m *Message) TryPutMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByName sets the value for the map field with the given name and
+// given key to the given value. It panics if an error is encountered. See
+// TryPutMapFieldByName.
+func (m *Message) PutMapFieldByName(name string, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByName(name, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByName sets the value for the map field with the given name and
+// the given key to the given value. An error is returned if the given name is
+// unknown, if it names a field that is not a map, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByName(name string, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+// PutMapFieldByNumber sets the value for the map field with the given tag
+// number and given key to the given value. It panics if an error is
+// encountered. See TryPutMapFieldByNumber.
+func (m *Message) PutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) {
+	if err := m.TryPutMapFieldByNumber(tagNumber, key, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryPutMapFieldByNumber sets the value for the map field with the given tag
+// number and the given key to the given value. An error is returned if the
+// given tag is unknown, if it indicates a field that is not a map, or if the
+// given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryPutMapFieldByNumber(tagNumber int, key interface{}, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.putMapField(fd, key, val)
+}
+
+func (m *Message) putMapField(fd *desc.FieldDescriptor, key interface{}, val interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return err
+	}
+	vfd := fd.GetMessageType().GetFields()[1]
+	vi, err := validElementFieldValue(vfd, val)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			mp = map[interface{}]interface{}{}
+			m.internalSetField(fd, map[interface{}]interface{}{ki: vi})
+			return nil
+		}
+	}
+	mp.(map[interface{}]interface{})[ki] = vi
+	return nil
+}
+
+// RemoveMapField changes the value for the given field descriptor by removing
+// any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapField.
+func (m *Message) RemoveMapField(fd *desc.FieldDescriptor, key interface{}) {
+	if err := m.TryRemoveMapField(fd, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapField changes the value for the given field descriptor by
+// removing any value associated with the given key. An error is returned if the
+// given field descriptor does not belong to the right message type or if the
+// given field is not a map field.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and any value for the given key
+// removed.
+func (m *Message) TryRemoveMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByName changes the value for the field with the given name by
+// removing any value associated with the given key. It panics if an error is
+// encountered. See TryRemoveMapFieldByName.
+func (m *Message) RemoveMapFieldByName(name string, key interface{}) {
+	if err := m.TryRemoveMapFieldByName(name, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByName changes the value for the field with the given name
+// by removing any value associated with the given key. An error is returned if
+// the given name is unknown or if it names a field that is not a map.
+func (m *Message) TryRemoveMapFieldByName(name string, key interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.removeMapField(fd, key)
+}
+
+// RemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. It panics if an
+// error is encountered. See TryRemoveMapFieldByNumber.
+func (m *Message) RemoveMapFieldByNumber(tagNumber int, key interface{}) {
+	if err := m.TryRemoveMapFieldByNumber(tagNumber, key); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryRemoveMapFieldByNumber changes the value for the field with the given tag
+// number by removing any value associated with the given key. An error is
+// returned if the given tag is unknown or if it indicates a field that is not
+// a map.
+func (m *Message) TryRemoveMapFieldByNumber(tagNumber int, key interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.removeMapField(fd, key)
+}
+
+func (m *Message) removeMapField(fd *desc.FieldDescriptor, key interface{}) error {
+	if !fd.IsMap() {
+		return FieldIsNotMapError
+	}
+	kfd := fd.GetMessageType().GetFields()[0]
+	ki, err := validElementFieldValue(kfd, key)
+	if err != nil {
+		return err
+	}
+	mp := m.values[fd.GetNumber()]
+	if mp == nil {
+		if mp, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if mp == nil {
+			return nil
+		}
+	}
+	res := mp.(map[interface{}]interface{})
+	delete(res, ki)
+	if len(res) == 0 {
+		delete(m.values, fd.GetNumber())
+	}
+	return nil
+}
+
+// FieldLength returns the number of elements in this message for the given
+// field descriptor. It panics if an error is encountered. See TryFieldLength.
+func (m *Message) FieldLength(fd *desc.FieldDescriptor) int {
+	l, err := m.TryFieldLength(fd)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLength returns the number of elements in this message for the given
+// field descriptor. An error is returned if the given field descriptor does not
+// belong to the right message type or if it is neither a map field nor a
+// repeated field.
+func (m *Message) TryFieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if err := m.checkField(fd); err != nil {
+		return 0, err
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByName returns the number of elements in this message for the
+// field with the given name. It panics if an error is encountered. See
+// TryFieldLengthByName.
+func (m *Message) FieldLengthByName(name string) int {
+	l, err := m.TryFieldLengthByName(name)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByName returns the number of elements in this message for the
+// field with the given name. An error is returned if the given name is unknown
+// or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByName(name string) (int, error) {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return 0, UnknownFieldNameError
+	}
+	return m.fieldLength(fd)
+}
+
+// FieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. It panics if an error is encountered. See
+// TryFieldLengthByNumber.
+func (m *Message) FieldLengthByNumber(tagNumber int32) int {
+	l, err := m.TryFieldLengthByNumber(tagNumber)
+	if err != nil {
+		panic(err.Error())
+	}
+	return l
+}
+
+// TryFieldLengthByNumber returns the number of elements in this message for the
+// field with the given tag number. An error is returned if the given tag is
+// unknown or if the named field is neither a map field nor a repeated field.
+func (m *Message) TryFieldLengthByNumber(tagNumber int32) (int, error) {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return 0, UnknownTagNumberError
+	}
+	return m.fieldLength(fd)
+}
+
+func (m *Message) fieldLength(fd *desc.FieldDescriptor) (int, error) {
+	if !fd.IsRepeated() {
+		return 0, FieldIsNotRepeatedError
+	}
+	val := m.values[fd.GetNumber()]
+	if val == nil {
+		var err error
+		if val, err = m.parseUnknownField(fd); err != nil {
+			return 0, err
+		} else if val == nil {
+			return 0, nil
+		}
+	}
+	if sl, ok := val.([]interface{}); ok {
+		return len(sl), nil
+	} else if mp, ok := val.(map[interface{}]interface{}); ok {
+		return len(mp), nil
+	}
+	return 0, nil
+}
+
+// GetRepeatedField returns the value for the given repeated field descriptor at
+// the given index. It panics if an error is encountered. See
+// TryGetRepeatedField.
+func (m *Message) GetRepeatedField(fd *desc.FieldDescriptor, index int) interface{} {
+	if v, err := m.TryGetRepeatedField(fd, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedField returns the value for the given repeated field descriptor
+// at the given index. An error is returned if the given field descriptor does
+// not belong to the right message type, if it is not a repeated field, or if
+// the given index is out of range (less than zero or greater than or equal to
+// the length of the repeated field). Also, even though map fields technically
+// are repeated fields, if the given field is a map field an error will result:
+// map representation does not lend itself to random access by index.
+//
+// The Go type of the value returned mirrors the type that protoc would generate
+// for the field's element type. (See TryGetField for more details on types).
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) but corresponds to an unknown field, the unknown value will be
+// parsed and become known. The value at the given index in the parsed value
+// will be returned. An error will be returned if the unknown value cannot be
+// parsed according to the field descriptor's type information.
+func (m *Message) TryGetRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return nil, err
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. It panics if an error is encountered. See
+// TryGetRepeatedFieldByName.
+func (m *Message) GetRepeatedFieldByName(name string, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByName(name, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByName returns the value for the repeated field with the
+// given name at the given index. An error is returned if the given name is
+// unknown, if it names a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater
+// than or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByName(name string, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return nil, UnknownFieldNameError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+// GetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. It panics if an error is encountered.
+// See TryGetRepeatedFieldByNumber.
+func (m *Message) GetRepeatedFieldByNumber(tagNumber int, index int) interface{} {
+	if v, err := m.TryGetRepeatedFieldByNumber(tagNumber, index); err != nil {
+		panic(err.Error())
+	} else {
+		return v
+	}
+}
+
+// TryGetRepeatedFieldByNumber returns the value for the repeated field with the
+// given tag number at the given index. An error is returned if the given tag is
+// unknown, if it indicates a field that is not a repeated field (or is a map
+// field), or if the given index is out of range (less than zero or greater than
+// or equal to the length of the repeated field).
+//
+// (See TryGetField for more info on types.)
+func (m *Message) TryGetRepeatedFieldByNumber(tagNumber int, index int) (interface{}, error) {
+	if index < 0 {
+		return nil, IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return nil, UnknownTagNumberError
+	}
+	return m.getRepeatedField(fd, index)
+}
+
+func (m *Message) getRepeatedField(fd *desc.FieldDescriptor, index int) (interface{}, error) {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return nil, FieldIsNotRepeatedError
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		var err error
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return nil, err
+		} else if sl == nil {
+			return nil, IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return nil, IndexOutOfRangeError
+	}
+	return res[index], nil
+}
+
+// AddRepeatedField appends the given value to the given repeated field. It
+// panics if an error is encountered. See TryAddRepeatedField.
+func (m *Message) AddRepeatedField(fd *desc.FieldDescriptor, val interface{}) {
+	if err := m.TryAddRepeatedField(fd, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedField appends the given value to the given repeated field. An
+// error is returned if the given field descriptor does not belong to the right
+// message type, if the given field is not repeated, or if the given value is
+// not a correct/compatible type for the given field. If the given field is a
+// map field, the call will succeed if the given value is an instance of the
+// map's entry message type.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the given value is appended to
+// it.
+func (m *Message) TryAddRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByName appends the given value to the repeated field with the
+// given name. It panics if an error is encountered. See
+// TryAddRepeatedFieldByName.
+func (m *Message) AddRepeatedFieldByName(name string, val interface{}) {
+	if err := m.TryAddRepeatedFieldByName(name, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByName appends the given value to the repeated field with
+// the given name. An error is returned if the given name is unknown, if it
+// names a field that is not repeated, or if the given value has an incorrect
+// type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByName(name string, val interface{}) error {
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+// AddRepeatedFieldByNumber appends the given value to the repeated field with
+// the given tag number. It panics if an error is encountered. See
+// TryAddRepeatedFieldByNumber.
+func (m *Message) AddRepeatedFieldByNumber(tagNumber int, val interface{}) {
+	if err := m.TryAddRepeatedFieldByNumber(tagNumber, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TryAddRepeatedFieldByNumber appends the given value to the repeated field
+// with the given tag number. An error is returned if the given tag is unknown,
+// if it indicates a field that is not repeated, or if the given value has an
+// incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TryAddRepeatedFieldByNumber(tagNumber int, val interface{}) error {
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.addRepeatedField(fd, val)
+}
+
+func (m *Message) addRepeatedField(fd *desc.FieldDescriptor, val interface{}) error {
+	if !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val)
+	if err != nil {
+		return err
+	}
+
+	if fd.IsMap() {
+		// We're lenient. Just as we allow setting a map field to a slice of entry messages, we also allow
+		// adding entries one at a time (as if the field were a normal repeated field).
+		msg := val.(proto.Message)
+		dm, err := asDynamicMessage(msg, fd.GetMessageType(), m.mf)
+		if err != nil {
+			return err
+		}
+		k, err := dm.TryGetFieldByNumber(1)
+		if err != nil {
+			return err
+		}
+		v, err := dm.TryGetFieldByNumber(2)
+		if err != nil {
+			return err
+		}
+		return m.putMapField(fd, k, v)
+	}
+
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			sl = []interface{}{}
+		}
+	}
+	res := sl.([]interface{})
+	res = append(res, val)
+	m.internalSetField(fd, res)
+	return nil
+}
+
+// SetRepeatedField sets the value for the given repeated field descriptor and
+// given index to the given value. It panics if an error is encountered. See
+// SetRepeatedField.
+func (m *Message) SetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) {
+	if err := m.TrySetRepeatedField(fd, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedField sets the value for the given repeated field descriptor
+// and given index to the given value. An error is returned if the given field
+// descriptor does not belong to the right message type, if the given field is
+// not repeated, or if the given value is not a correct/compatible type for the
+// given field. Also, even though map fields technically are repeated fields, if
+// the given field is a map field an error will result: map representation does
+// not lend itself to random access by index.
+//
+// The Go type expected for a field  is the same as required by TrySetField for
+// a non-repeated field of the same type.
+//
+// If the given field descriptor is not known (e.g. not present in the message
+// descriptor) it will become known. Subsequent operations using tag numbers or
+// names will be able to resolve the newly-known type. If the message has a
+// value for the unknown value, it is parsed and the element at the given index
+// is replaced with the given value.
+func (m *Message) TrySetRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	if err := m.checkField(fd); err != nil {
+		return err
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByName sets the value for the repeated field with the given
+// name and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByName.
+func (m *Message) SetRepeatedFieldByName(name string, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByName(name, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByName sets the value for the repeated field with the
+// given name and the given index to the given value. An error is returned if
+// the given name is unknown, if it names a field that is not repeated (or is a
+// map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByName(name string, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptorByName(name)
+	if fd == nil {
+		return UnknownFieldNameError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+// SetRepeatedFieldByNumber sets the value for the repeated field with the given
+// tag number and given index to the given value. It panics if an error is
+// encountered. See TrySetRepeatedFieldByNumber.
+func (m *Message) SetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) {
+	if err := m.TrySetRepeatedFieldByNumber(tagNumber, index, val); err != nil {
+		panic(err.Error())
+	}
+}
+
+// TrySetRepeatedFieldByNumber sets the value for the repeated field with the
+// given tag number and the given index to the given value. An error is returned
+// if the given tag is unknown, if it indicates a field that is not repeated (or
+// is a map field), or if the given value has an incorrect type.
+//
+// (See TrySetField for more info on types.)
+func (m *Message) TrySetRepeatedFieldByNumber(tagNumber int, index int, val interface{}) error {
+	if index < 0 {
+		return IndexOutOfRangeError
+	}
+	fd := m.FindFieldDescriptor(int32(tagNumber))
+	if fd == nil {
+		return UnknownTagNumberError
+	}
+	return m.setRepeatedField(fd, index, val)
+}
+
+func (m *Message) setRepeatedField(fd *desc.FieldDescriptor, index int, val interface{}) error {
+	if fd.IsMap() || !fd.IsRepeated() {
+		return FieldIsNotRepeatedError
+	}
+	val, err := validElementFieldValue(fd, val)
+	if err != nil {
+		return err
+	}
+	sl := m.values[fd.GetNumber()]
+	if sl == nil {
+		if sl, err = m.parseUnknownField(fd); err != nil {
+			return err
+		} else if sl == nil {
+			return IndexOutOfRangeError
+		}
+	}
+	res := sl.([]interface{})
+	if index >= len(res) {
+		return IndexOutOfRangeError
+	}
+	res[index] = val
+	return nil
+}
+
+// GetUnknownField gets the value(s) for the given unknown tag number. If this
+// message has no unknown fields with the given tag, nil is returned.
+func (m *Message) GetUnknownField(tagNumber int32) []UnknownField {
+	if u, ok := m.unknownFields[tagNumber]; ok {
+		return u
+	} else {
+		return nil
+	}
+}
+
+func (m *Message) parseUnknownField(fd *desc.FieldDescriptor) (interface{}, error) {
+	unks, ok := m.unknownFields[fd.GetNumber()]
+	if !ok {
+		return nil, nil
+	}
+	var v interface{}
+	var sl []interface{}
+	var mp map[interface{}]interface{}
+	if fd.IsMap() {
+		mp = map[interface{}]interface{}{}
+	}
+	var err error
+	for _, unk := range unks {
+		var val interface{}
+		if unk.Encoding == proto.WireBytes || unk.Encoding == proto.WireStartGroup {
+			val, err = unmarshalLengthDelimitedField(fd, unk.Contents, m.mf)
+		} else {
+			val, err = unmarshalSimpleField(fd, unk.Value)
+		}
+		if err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			newEntry := val.(*Message)
+			kk, err := newEntry.TryGetFieldByNumber(1)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := newEntry.TryGetFieldByNumber(2)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+			v = mp
+		} else if fd.IsRepeated() {
+			t := reflect.TypeOf(val)
+			if t.Kind() == reflect.Slice && t != typeOfBytes {
+				// append slices if we unmarshalled a packed repeated field
+				newVals := val.([]interface{})
+				sl = append(sl, newVals...)
+			} else {
+				sl = append(sl, val)
+			}
+			v = sl
+		} else {
+			v = val
+		}
+	}
+	m.internalSetField(fd, v)
+	return v, nil
+}
+
+func validFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+	return validFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	if fd.IsMap() && val.Kind() == reflect.Map {
+		return validFieldValueForMapField(fd, val)
+	}
+
+	if fd.IsRepeated() { // this will also catch map fields where given value was not a map
+		if val.Kind() != reflect.Array && val.Kind() != reflect.Slice {
+			if fd.IsMap() {
+				return nil, fmt.Errorf("value for map field must be a map; instead was %v", val.Type())
+			} else {
+				return nil, fmt.Errorf("value for repeated field must be a slice; instead was %v", val.Type())
+			}
+		}
+
+		if fd.IsMap() {
+			// value should be a slice of entry messages that we need convert into a map[interface{}]interface{}
+			m := map[interface{}]interface{}{}
+			for i := 0; i < val.Len(); i++ {
+				e, err := validElementFieldValue(fd, val.Index(i).Interface())
+				if err != nil {
+					return nil, err
+				}
+				msg := e.(proto.Message)
+				dm, err := asDynamicMessage(msg, fd.GetMessageType(), nil)
+				if err != nil {
+					return nil, err
+				}
+				k, err := dm.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				v, err := dm.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				m[k] = v
+			}
+			return m, nil
+		}
+
+		// make a defensive copy while checking contents (also converts to []interface{})
+		s := make([]interface{}, val.Len())
+		for i := 0; i < val.Len(); i++ {
+			ev := val.Index(i)
+			if ev.Kind() == reflect.Interface {
+				// unwrap it
+				ev = reflect.ValueOf(ev.Interface())
+			}
+			e, err := validElementFieldValueForRv(fd, ev)
+			if err != nil {
+				return nil, err
+			}
+			s[i] = e
+		}
+
+		return s, nil
+	}
+
+	return validElementFieldValueForRv(fd, val)
+}
+
+func asDynamicMessage(m proto.Message, md *desc.MessageDescriptor, mf *MessageFactory) (*Message, error) {
+	if dm, ok := m.(*Message); ok {
+		return dm, nil
+	}
+	dm := NewMessageWithMessageFactory(md, mf)
+	if err := dm.mergeFrom(m); err != nil {
+		return nil, err
+	}
+	return dm, nil
+}
+
+func validElementFieldValue(fd *desc.FieldDescriptor, val interface{}) (interface{}, error) {
+	return validElementFieldValueForRv(fd, reflect.ValueOf(val))
+}
+
+func validElementFieldValueForRv(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	t := fd.GetType()
+	if !val.IsValid() {
+		return nil, typeError(fd, nil)
+	}
+
+	switch t {
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM:
+		return toInt32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return toInt64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_UINT32:
+		return toUint32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64:
+		return toUint64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		return toFloat32(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return toFloat64(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return toBool(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return toBytes(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return toString(reflect.Indirect(val), fd)
+
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+		m, err := asMessage(val, fd.GetFullyQualifiedName())
+		// check that message is correct type
+		if err != nil {
+			return nil, err
+		}
+		var msgType string
+		if dm, ok := m.(*Message); ok {
+			msgType = dm.GetMessageDescriptor().GetFullyQualifiedName()
+		} else {
+			msgType = proto.MessageName(m)
+		}
+		if msgType != fd.GetMessageType().GetFullyQualifiedName() {
+			return nil, fmt.Errorf("message field %s requires value of type %s; received %s", fd.GetFullyQualifiedName(), fd.GetMessageType().GetFullyQualifiedName(), msgType)
+		}
+		return m, nil
+
+	default:
+		return nil, fmt.Errorf("unable to handle unrecognized field type: %v", fd.GetType())
+	}
+}
+
+func toInt32(v reflect.Value, fd *desc.FieldDescriptor) (int32, error) {
+	if v.Kind() == reflect.Int32 {
+		return int32(v.Int()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint32(v reflect.Value, fd *desc.FieldDescriptor) (uint32, error) {
+	if v.Kind() == reflect.Uint32 {
+		return uint32(v.Uint()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat32(v reflect.Value, fd *desc.FieldDescriptor) (float32, error) {
+	if v.Kind() == reflect.Float32 {
+		return float32(v.Float()), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toInt64(v reflect.Value, fd *desc.FieldDescriptor) (int64, error) {
+	if v.Kind() == reflect.Int64 || v.Kind() == reflect.Int || v.Kind() == reflect.Int32 {
+		return v.Int(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toUint64(v reflect.Value, fd *desc.FieldDescriptor) (uint64, error) {
+	if v.Kind() == reflect.Uint64 || v.Kind() == reflect.Uint || v.Kind() == reflect.Uint32 {
+		return v.Uint(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toFloat64(v reflect.Value, fd *desc.FieldDescriptor) (float64, error) {
+	if v.Kind() == reflect.Float64 || v.Kind() == reflect.Float32 {
+		return v.Float(), nil
+	}
+	return 0, typeError(fd, v.Type())
+}
+
+func toBool(v reflect.Value, fd *desc.FieldDescriptor) (bool, error) {
+	if v.Kind() == reflect.Bool {
+		return v.Bool(), nil
+	}
+	return false, typeError(fd, v.Type())
+}
+
+func toBytes(v reflect.Value, fd *desc.FieldDescriptor) ([]byte, error) {
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 {
+		return v.Bytes(), nil
+	}
+	return nil, typeError(fd, v.Type())
+}
+
+func toString(v reflect.Value, fd *desc.FieldDescriptor) (string, error) {
+	if v.Kind() == reflect.String {
+		return v.String(), nil
+	}
+	return "", typeError(fd, v.Type())
+}
+
+func typeError(fd *desc.FieldDescriptor, t reflect.Type) error {
+	return fmt.Errorf(
+		"%s field %s is not compatible with value of type %v",
+		getTypeString(fd), fd.GetFullyQualifiedName(), t)
+}
+
+func getTypeString(fd *desc.FieldDescriptor) string {
+	return strings.ToLower(fd.GetType().String())
+}
+
+func asMessage(v reflect.Value, fieldName string) (proto.Message, error) {
+	t := v.Type()
+	// we need a pointer to a struct that implements proto.Message
+	if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct || !t.Implements(typeOfProtoMessage) {
+		return nil, fmt.Errorf("message field %s requires is not compatible with value of type %v", fieldName, v.Type())
+	}
+	return v.Interface().(proto.Message), nil
+}
+
+// Reset resets this message to an empty message. It removes all values set in
+// the message.
+func (m *Message) Reset() {
+	for k := range m.values {
+		delete(m.values, k)
+	}
+	for k := range m.unknownFields {
+		delete(m.unknownFields, k)
+	}
+}
+
+// String returns this message rendered in compact text format.
+func (m *Message) String() string {
+	b, err := m.MarshalText()
+	if err != nil {
+		panic(fmt.Sprintf("Failed to create string representation of message: %s", err.Error()))
+	}
+	return string(b)
+}
+
+// ProtoMessage is present to satisfy the proto.Message interface.
+func (m *Message) ProtoMessage() {
+}
+
+// ConvertTo converts this dynamic message into the given message. This is
+// shorthand for resetting then merging:
+//   target.Reset()
+//   m.MergeInto(target)
+func (m *Message) ConvertTo(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	target.Reset()
+	return m.mergeInto(target)
+}
+
+// ConvertFrom converts the given message into this dynamic message. This is
+// shorthand for resetting then merging:
+//   m.Reset()
+//   m.MergeFrom(target)
+func (m *Message) ConvertFrom(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+
+	m.Reset()
+	return m.mergeFrom(target)
+}
+
+// MergeInto merges this dynamic message into the given message. All field
+// values in this message will be set on the given message. For map fields,
+// entries are added to the given message (if the given message has existing
+// values for like keys, they are overwritten). For slice fields, elements are
+// added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in this message to be represented as unknown fields in the
+// given message after merging, and vice versa.
+func (m *Message) MergeInto(target proto.Message) error {
+	if err := m.checkType(target); err != nil {
+		return err
+	}
+	return m.mergeInto(target)
+}
+
+// MergeFrom merges the given message into this dynamic message. All field
+// values in the given message will be set on this message. For map fields,
+// entries are added to this message (if this message has existing values for
+// like keys, they are overwritten). For slice fields, elements are added.
+//
+// If the given message has a different set of known fields, it is possible for
+// some known fields in that message to be represented as unknown fields in this
+// message after merging, and vice versa.
+func (m *Message) MergeFrom(source proto.Message) error {
+	if err := m.checkType(source); err != nil {
+		return err
+	}
+	return m.mergeFrom(source)
+}
+
+// Merge implements the proto.Merger interface so that dynamic messages are
+// compatible with the proto.Merge function. It delegates to MergeFrom but will
+// panic on error as the proto.Merger interface doesn't allow for returning an
+// error.
+//
+// Unlike nearly all other methods, this method can work if this message's type
+// is not defined (such as instantiating the message without using NewMessage).
+// This is strictly so that dynamic message's are compatible with the
+// proto.Clone function, which instantiates a new message via reflection (thus
+// its message descriptor will not be set) and than calls Merge.
+func (m *Message) Merge(source proto.Message) {
+	if m.md == nil {
+		// To support proto.Clone, initialize the descriptor from the source.
+		if dm, ok := source.(*Message); ok {
+			m.md = dm.md
+			// also make sure the clone uses the same message factory and
+			// extensions and also knows about the same extra fields (if any)
+			m.mf = dm.mf
+			m.er = dm.er
+			m.extraFields = dm.extraFields
+		} else if md, err := desc.LoadMessageDescriptorForMessage(source); err != nil {
+			panic(err.Error())
+		} else {
+			m.md = md
+		}
+	}
+
+	if err := m.MergeFrom(source); err != nil {
+		panic(err.Error())
+	}
+}
+
+func (m *Message) checkType(target proto.Message) error {
+	if dm, ok := target.(*Message); ok {
+		if dm.md.GetFullyQualifiedName() != m.md.GetFullyQualifiedName() {
+			return fmt.Errorf("given message has wrong type: %q; expecting %q", dm.md.GetFullyQualifiedName(), m.md.GetFullyQualifiedName())
+		}
+		return nil
+	}
+
+	msgName := proto.MessageName(target)
+	if msgName != m.md.GetFullyQualifiedName() {
+		return fmt.Errorf("given message has wrong type: %q; expecting %q", msgName, m.md.GetFullyQualifiedName())
+	}
+	return nil
+}
+
+func (m *Message) mergeInto(pm proto.Message) error {
+	if dm, ok := pm.(*Message); ok {
+		return dm.mergeFrom(m)
+	}
+
+	target := reflect.ValueOf(pm)
+	if target.Kind() == reflect.Ptr {
+		target = target.Elem()
+	}
+
+	// track tags for which the dynamic message has data but the given
+	// message doesn't know about it
+	u := target.FieldByName("XXX_unrecognized")
+	var unknownTags map[int32]struct{}
+	if u.IsValid() && u.Type() == typeOfBytes {
+		unknownTags = map[int32]struct{}{}
+		for tag := range m.values {
+			unknownTags[tag] = struct{}{}
+		}
+	}
+
+	// check that we can successfully do the merge
+	structProps := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	for _, prop := range structProps.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		f := target.FieldByName(prop.Name)
+		ft := f.Type()
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// check one-of fields
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		stf, ok := oop.Type.Elem().FieldByName(prop.Name)
+		if !ok {
+			return fmt.Errorf("one-of field indicates struct field name %s, but type %v has no such field", prop.Name, oop.Type.Elem())
+		}
+		ft := stf.Type
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+	// and check extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		if unknownTags != nil {
+			delete(unknownTags, tag)
+		}
+		ft := reflect.TypeOf(ext.ExtensionType)
+		val := reflect.ValueOf(v)
+		if !canConvert(val, ft) {
+			return fmt.Errorf("cannot convert %v to %v", val.Type(), ft)
+		}
+	}
+
+	// now actually perform the merge
+	for _, prop := range structProps.Prop {
+		v, ok := m.values[int32(prop.Tag)]
+		if !ok {
+			continue
+		}
+		f := target.FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+			return err
+		}
+	}
+	// merge one-ofs
+	for _, oop := range structProps.OneofTypes {
+		prop := oop.Prop
+		tag := int32(prop.Tag)
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		oov := reflect.New(oop.Type.Elem())
+		f := oov.Elem().FieldByName(prop.Name)
+		if err := mergeVal(reflect.ValueOf(v), f); err != nil {
+			return err
+		}
+		target.Field(oop.Field).Set(oov)
+	}
+	// merge extensions, too
+	for tag, ext := range proto.RegisteredExtensions(pm) {
+		v, ok := m.values[tag]
+		if !ok {
+			continue
+		}
+		e := reflect.New(reflect.TypeOf(ext.ExtensionType)).Elem()
+		if err := mergeVal(reflect.ValueOf(v), e); err != nil {
+			return err
+		}
+		if err := proto.SetExtension(pm, ext, e.Interface()); err != nil {
+			// shouldn't happen since we already checked that the extension type was compatible above
+			return err
+		}
+	}
+
+	// if we have fields that the given message doesn't know about, add to its unknown fields
+	if len(unknownTags) > 0 {
+		ub := u.Interface().([]byte)
+		var b codedBuffer
+		for tag := range unknownTags {
+			fd := m.FindFieldDescriptor(tag)
+			if err := marshalField(tag, fd, m.values[tag], &b, false); err != nil {
+				return err
+			}
+		}
+		ub = append(ub, b.buf...)
+		u.Set(reflect.ValueOf(ub))
+	}
+
+	// finally, convey unknown fields into the given message by letting it unmarshal them
+	// (this will append to its unknown fields if not known; if somehow the given message recognizes
+	// a field even though the dynamic message did not, it will get correctly unmarshalled)
+	if unknownTags != nil && len(m.unknownFields) > 0 {
+		var b codedBuffer
+		m.marshalUnknownFields(&b)
+		proto.UnmarshalMerge(b.buf, pm)
+	}
+
+	return nil
+}
+
+func canConvert(src reflect.Value, target reflect.Type) bool {
+	if src.Kind() == reflect.Interface {
+		src = reflect.ValueOf(src.Interface())
+	}
+	srcType := src.Type()
+	// we allow convertible types instead of requiring exact types so that calling
+	// code can, for example, assign an enum constant to an enum field. In that case,
+	// one type is the enum type (a sub-type of int32) and the other may be the int32
+	// type. So we automatically do the conversion in that case.
+	if srcType.ConvertibleTo(target) {
+		return true
+	} else if target.Kind() == reflect.Ptr && srcType.ConvertibleTo(target.Elem()) {
+		return true
+	} else if target.Kind() == reflect.Slice {
+		if srcType.Kind() != reflect.Slice {
+			return false
+		}
+		et := target.Elem()
+		for i := 0; i < src.Len(); i++ {
+			if !canConvert(src.Index(i), et) {
+				return false
+			}
+		}
+		return true
+	} else if target.Kind() == reflect.Map {
+		if srcType.Kind() != reflect.Map {
+			return false
+		}
+		return canConvertMap(src, target)
+	} else if srcType == typeOfDynamicMessage && target.Implements(typeOfProtoMessage) {
+		z := reflect.Zero(target).Interface()
+		msgType := proto.MessageName(z.(proto.Message))
+		return msgType == src.Interface().(*Message).GetMessageDescriptor().GetFullyQualifiedName()
+	} else {
+		return false
+	}
+}
+
+func mergeVal(src, target reflect.Value) error {
+	if src.Kind() == reflect.Interface && !src.IsNil() {
+		src = src.Elem()
+	}
+	srcType := src.Type()
+	targetType := target.Type()
+	if srcType.ConvertibleTo(targetType) {
+		if targetType.Implements(typeOfProtoMessage) && !target.IsNil() {
+			Merge(target.Interface().(proto.Message), src.Convert(targetType).Interface().(proto.Message))
+		} else {
+			target.Set(src.Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Ptr && srcType.ConvertibleTo(targetType.Elem()) {
+		if !src.CanAddr() {
+			target.Set(reflect.New(targetType.Elem()))
+			target.Elem().Set(src.Convert(targetType.Elem()))
+		} else {
+			target.Set(src.Addr().Convert(targetType))
+		}
+	} else if targetType.Kind() == reflect.Slice {
+		l := target.Len()
+		newL := l + src.Len()
+		if target.Cap() < newL {
+			// expand capacity of the slice and copy
+			newSl := reflect.MakeSlice(targetType, newL, newL)
+			for i := 0; i < target.Len(); i++ {
+				newSl.Index(i).Set(target.Index(i))
+			}
+			target.Set(newSl)
+		} else {
+			target.SetLen(newL)
+		}
+		for i := 0; i < src.Len(); i++ {
+			dest := target.Index(l + i)
+			if dest.Kind() == reflect.Ptr {
+				dest.Set(reflect.New(dest.Type().Elem()))
+			}
+			if err := mergeVal(src.Index(i), dest); err != nil {
+				return err
+			}
+		}
+	} else if targetType.Kind() == reflect.Map {
+		return mergeMapVal(src, target, targetType)
+	} else if srcType == typeOfDynamicMessage && targetType.Implements(typeOfProtoMessage) {
+		dm := src.Interface().(*Message)
+		if target.IsNil() {
+			target.Set(reflect.New(targetType.Elem()))
+		}
+		m := target.Interface().(proto.Message)
+		if err := dm.mergeInto(m); err != nil {
+			return err
+		}
+	} else {
+		return fmt.Errorf("cannot convert %v to %v", srcType, targetType)
+	}
+	return nil
+}
+
+func (m *Message) mergeFrom(pm proto.Message) error {
+	if dm, ok := pm.(*Message); ok {
+		// if given message is also a dynamic message, we merge differently
+		for tag, v := range dm.values {
+			fd := m.FindFieldDescriptor(tag)
+			if fd == nil {
+				fd = dm.FindFieldDescriptor(tag)
+			}
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	pmrv := reflect.ValueOf(pm)
+	if pmrv.IsNil() {
+		// nil is an empty message, so nothing to do
+		return nil
+	}
+
+	// check that we can successfully do the merge
+	src := pmrv.Elem()
+	values := map[*desc.FieldDescriptor]interface{}{}
+	props := proto.GetProperties(reflect.TypeOf(pm).Elem())
+	if props == nil {
+		return fmt.Errorf("could not determine message properties to merge for %v", reflect.TypeOf(pm).Elem())
+	}
+
+	// regular fields
+	for _, prop := range props.Prop {
+		if prop.Tag == 0 {
+			continue // one-of or special field (such as XXX_unrecognized, etc.)
+		}
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name)
+			}
+		}
+		rv := src.FieldByName(prop.Name)
+		if (rv.Kind() == reflect.Ptr || rv.Kind() == reflect.Slice) && rv.IsNil() {
+			continue
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// one-of fields
+	for _, oop := range props.OneofTypes {
+		oov := src.Field(oop.Field).Elem()
+		if !oov.IsValid() || oov.Type() != oop.Type {
+			// this field is unset (in other words, one-of message field is not currently set to this option)
+			continue
+		}
+		prop := oop.Prop
+		rv := oov.Elem().FieldByName(prop.Name)
+		fd := m.FindFieldDescriptor(int32(prop.Tag))
+		if fd == nil {
+			// Our descriptor has different fields than this message object. So
+			// try to reflect on the message object's fields.
+			md, err := desc.LoadMessageDescriptorForMessage(pm)
+			if err != nil {
+				return err
+			}
+			fd = md.FindFieldByNumber(int32(prop.Tag))
+			if fd == nil {
+				return fmt.Errorf("message descriptor %q did not contain field for tag %d (%q in one-of %q)", md.GetFullyQualifiedName(), prop.Tag, prop.Name, src.Type().Field(oop.Field).Name)
+			}
+		}
+		if v, err := validFieldValueForRv(fd, rv); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// extension fields
+	rexts, _ := proto.ExtensionDescs(pm)
+	var unknownExtensions []byte
+	for _, ed := range rexts {
+		v, _ := proto.GetExtension(pm, ed)
+		if v == nil {
+			continue
+		}
+		if ed.ExtensionType == nil {
+			extBytes, _ := v.([]byte)
+			if len(extBytes) > 0 {
+				unknownExtensions = append(unknownExtensions, extBytes...)
+			}
+			continue
+		}
+		fd := m.er.FindExtension(m.md.GetFullyQualifiedName(), ed.Field)
+		if fd == nil {
+			var err error
+			if fd, err = desc.LoadFieldDescriptorForExtension(ed); err != nil {
+				return err
+			}
+		}
+		if v, err := validFieldValue(fd, v); err != nil {
+			return err
+		} else {
+			values[fd] = v
+		}
+	}
+
+	// now actually perform the merge
+	for fd, v := range values {
+		mergeField(m, fd, v)
+	}
+
+	u := src.FieldByName("XXX_unrecognized")
+	if u.IsValid() && u.Type() == typeOfBytes {
+		// ignore any error returned: pulling in unknown fields is best-effort
+		m.UnmarshalMerge(u.Interface().([]byte))
+	}
+
+	// lastly, also extract any unknown extensions the message may have (unknown extensions
+	// are stored with other extensions, not in the XXX_unrecognized field, so we have to do
+	// more than just the step above...)
+	if len(unknownExtensions) > 0 {
+		// pulling in unknown fields is best-effort, so we just ignore errors
+		m.UnmarshalMerge(unknownExtensions)
+	}
+	return nil
+}
+
+// Validate checks that all required fields are present. It returns an error if any are absent.
+func (m *Message) Validate() error {
+	missingFields := m.findMissingFields()
+	if len(missingFields) == 0 {
+		return nil
+	}
+	return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+}
+
+func (m *Message) findMissingFields() []string {
+	if m.md.IsProto3() {
+		// proto3 does not allow required fields
+		return nil
+	}
+	var missingFields []string
+	for _, fd := range m.md.GetFields() {
+		if fd.IsRequired() {
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				missingFields = append(missingFields, fd.GetName())
+			}
+		}
+	}
+	return missingFields
+}
+
+// ValidateRecursive checks that all required fields are present and also
+// recursively validates all fields who are also messages. It returns an error
+// if any required fields, in this message or nested within, are absent.
+func (m *Message) ValidateRecursive() error {
+	return m.validateRecursive("")
+}
+
+func (m *Message) validateRecursive(prefix string) error {
+	if missingFields := m.findMissingFields(); len(missingFields) > 0 {
+		for i := range missingFields {
+			missingFields[i] = fmt.Sprintf("%s%s", prefix, missingFields[i])
+		}
+		return fmt.Errorf("some required fields missing: %v", strings.Join(missingFields, ", "))
+	}
+
+	for tag, fld := range m.values {
+		fd := m.FindFieldDescriptor(tag)
+		var chprefix string
+		var md *desc.MessageDescriptor
+		checkMsg := func(pm proto.Message) error {
+			var dm *Message
+			if d, ok := pm.(*Message); ok {
+				dm = d
+			} else {
+				dm = m.mf.NewDynamicMessage(md)
+				if err := dm.ConvertFrom(pm); err != nil {
+					return nil
+				}
+			}
+			if err := dm.validateRecursive(chprefix); err != nil {
+				return err
+			}
+			return nil
+		}
+		isMap := fd.IsMap()
+		if isMap && fd.GetMapValueType().GetMessageType() != nil {
+			md = fd.GetMapValueType().GetMessageType()
+			mp := fld.(map[interface{}]interface{})
+			for k, v := range mp {
+				chprefix = fmt.Sprintf("%s%s[%v].", prefix, getName(fd), k)
+				if err := checkMsg(v.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		} else if !isMap && fd.GetMessageType() != nil {
+			md = fd.GetMessageType()
+			if fd.IsRepeated() {
+				sl := fld.([]interface{})
+				for i, v := range sl {
+					chprefix = fmt.Sprintf("%s%s[%d].", prefix, getName(fd), i)
+					if err := checkMsg(v.(proto.Message)); err != nil {
+						return err
+					}
+				}
+			} else {
+				chprefix = fmt.Sprintf("%s%s.", prefix, getName(fd))
+				if err := checkMsg(fld.(proto.Message)); err != nil {
+					return err
+				}
+			}
+		}
+	}
+
+	return nil
+}
+
+func getName(fd *desc.FieldDescriptor) string {
+	if fd.IsExtension() {
+		return fmt.Sprintf("(%s)", fd.GetFullyQualifiedName())
+	} else {
+		return fd.GetName()
+	}
+}
+
+// knownFieldTags return tags of present and recognized fields, in sorted order.
+func (m *Message) knownFieldTags() []int {
+	if len(m.values) == 0 {
+		return []int(nil)
+	}
+
+	keys := make([]int, len(m.values))
+	i := 0
+	for k := range m.values {
+		keys[i] = int(k)
+		i++
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// allKnownFieldTags return tags of present and recognized fields, including
+// those that are unset, in sorted order. This only includes extensions that are
+// present. Known but not-present extensions are not included in the returned
+// set of tags.
+func (m *Message) allKnownFieldTags() []int {
+	fds := m.md.GetFields()
+	keys := make([]int, 0, len(fds)+len(m.extraFields))
+
+	for k := range m.values {
+		keys = append(keys, int(k))
+	}
+
+	// also include known fields that are not present
+	for _, fd := range fds {
+		if _, ok := m.values[fd.GetNumber()]; !ok {
+			keys = append(keys, int(fd.GetNumber()))
+		}
+	}
+	for _, fd := range m.extraFields {
+		if !fd.IsExtension() { // skip extensions that are not present
+			if _, ok := m.values[fd.GetNumber()]; !ok {
+				keys = append(keys, int(fd.GetNumber()))
+			}
+		}
+	}
+
+	sort.Ints(keys)
+	return keys
+}
+
+// unknownFieldTags return tags of present but unrecognized fields, in sorted order.
+func (m *Message) unknownFieldTags() []int {
+	if len(m.unknownFields) == 0 {
+		return []int(nil)
+	}
+	keys := make([]int, len(m.unknownFields))
+	i := 0
+	for k := range m.unknownFields {
+		keys[i] = int(k)
+		i++
+	}
+	sort.Ints(keys)
+	return keys
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/equal.go b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
new file mode 100644
index 0000000..5fbcc24
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/equal.go
@@ -0,0 +1,152 @@
+package dynamic
+
+import (
+	"bytes"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Equal returns true if the given two dynamic messages are equal. Two messages are equal when they
+// have the same message type and same fields set to equal values. For proto3 messages, fields set
+// to their zero value are considered unset.
+func Equal(a, b *Message) bool {
+	if a.md.GetFullyQualifiedName() != b.md.GetFullyQualifiedName() {
+		return false
+	}
+	if len(a.values) != len(b.values) {
+		return false
+	}
+	if len(a.unknownFields) != len(b.unknownFields) {
+		return false
+	}
+	for tag, aval := range a.values {
+		bval, ok := b.values[tag]
+		if !ok {
+			return false
+		}
+		if !fieldsEqual(aval, bval) {
+			return false
+		}
+	}
+	for tag, au := range a.unknownFields {
+		bu, ok := b.unknownFields[tag]
+		if !ok {
+			return false
+		}
+		if len(au) != len(bu) {
+			return false
+		}
+		for i, aval := range au {
+			bval := bu[i]
+			if aval.Encoding != bval.Encoding {
+				return false
+			}
+			if aval.Encoding == proto.WireBytes || aval.Encoding == proto.WireStartGroup {
+				if !bytes.Equal(aval.Contents, bval.Contents) {
+					return false
+				}
+			} else if aval.Value != bval.Value {
+				return false
+			}
+		}
+	}
+	// all checks pass!
+	return true
+}
+
+func fieldsEqual(aval, bval interface{}) bool {
+	arv := reflect.ValueOf(aval)
+	brv := reflect.ValueOf(bval)
+	if arv.Type() != brv.Type() {
+		// it is possible that one is a dynamic message and one is not
+		apm, ok := aval.(proto.Message)
+		if !ok {
+			return false
+		}
+		bpm, ok := bval.(proto.Message)
+		if !ok {
+			return false
+		}
+		return MessagesEqual(apm, bpm)
+
+	} else {
+		switch arv.Kind() {
+		case reflect.Ptr:
+			apm, ok := aval.(proto.Message)
+			if !ok {
+				// Don't know how to compare pointer values that aren't messages!
+				// Maybe this should panic?
+				return false
+			}
+			bpm := bval.(proto.Message) // we know it will succeed because we know a and b have same type
+			return MessagesEqual(apm, bpm)
+
+		case reflect.Map:
+			return mapsEqual(arv, brv)
+
+		case reflect.Slice:
+			if arv.Type() == typeOfBytes {
+				return bytes.Equal(aval.([]byte), bval.([]byte))
+			} else {
+				return slicesEqual(arv, brv)
+			}
+
+		default:
+			return aval == bval
+		}
+	}
+}
+
+func slicesEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	for i := 0; i < a.Len(); i++ {
+		ai := a.Index(i)
+		bi := b.Index(i)
+		if !fieldsEqual(ai.Interface(), bi.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+// MessagesEqual returns true if the given two messages are equal. Use this instead of proto.Equal
+// when one or both of the messages might be a dynamic message.
+func MessagesEqual(a, b proto.Message) bool {
+	da, aok := a.(*Message)
+	db, bok := b.(*Message)
+	// Both dynamic messages
+	if aok && bok {
+		return Equal(da, db)
+	}
+	// Neither dynamic messages
+	if !aok && !bok {
+		return proto.Equal(a, b)
+	}
+	// Mixed
+	if aok {
+		md, err := desc.LoadMessageDescriptorForMessage(b)
+		if err != nil {
+			return false
+		}
+		db = NewMessageWithMessageFactory(md, da.mf)
+		if db.ConvertFrom(b) != nil {
+			return false
+		}
+		return Equal(da, db)
+	} else {
+		md, err := desc.LoadMessageDescriptorForMessage(a)
+		if err != nil {
+			return false
+		}
+		da = NewMessageWithMessageFactory(md, db.mf)
+		if da.ConvertFrom(a) != nil {
+			return false
+		}
+		return Equal(da, db)
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension.go b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
new file mode 100644
index 0000000..a0ff6af
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension.go
@@ -0,0 +1,44 @@
+package dynamic
+
+import (
+	"fmt"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// SetExtension sets the given extension value. If the given message is not a
+// dynamic message, the given extension may not be recognized (or may differ
+// from the compiled and linked in version of the extension. So in that case,
+// this function will serialize the given value to bytes and then use
+// proto.SetRawExtension to set the value.
+func SetExtension(msg proto.Message, extd *desc.FieldDescriptor, val interface{}) error {
+	if !extd.IsExtension() {
+		return fmt.Errorf("given field %s is not an extension", extd.GetFullyQualifiedName())
+	}
+
+	if dm, ok := msg.(*Message); ok {
+		return dm.TrySetField(extd, val)
+	}
+
+	md, err := desc.LoadMessageDescriptorForMessage(msg)
+	if err != nil {
+		return err
+	}
+	if err := checkField(extd, md); err != nil {
+		return err
+	}
+
+	val, err = validFieldValue(extd, val)
+	if err != nil {
+		return err
+	}
+
+	var b codedBuffer
+	if err := marshalField(extd.GetNumber(), extd, val, &b, defaultDeterminism); err != nil {
+		return err
+	}
+	proto.SetRawExtension(msg, extd.GetNumber(), b.buf)
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
new file mode 100644
index 0000000..6876827
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/extension_registry.go
@@ -0,0 +1,241 @@
+package dynamic
+
+import (
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// ExtensionRegistry is a registry of known extension fields. This is used to parse
+// extension fields encountered when de-serializing a dynamic message.
+type ExtensionRegistry struct {
+	includeDefault bool
+	mu             sync.RWMutex
+	exts           map[string]map[int32]*desc.FieldDescriptor
+}
+
+// NewExtensionRegistryWithDefaults is a registry that includes all "default" extensions,
+// which are those that are statically linked into the current program (e.g. registered by
+// protoc-generated code via proto.RegisterExtension). Extensions explicitly added to the
+// registry will override any default extensions that are for the same extendee and have the
+// same tag number and/or name.
+func NewExtensionRegistryWithDefaults() *ExtensionRegistry {
+	return &ExtensionRegistry{includeDefault: true}
+}
+
+// AddExtensionDesc adds the given extensions to the registry.
+func (r *ExtensionRegistry) AddExtensionDesc(exts ...*proto.ExtensionDesc) error {
+	flds := make([]*desc.FieldDescriptor, len(exts))
+	for i, ext := range exts {
+		fd, err := desc.LoadFieldDescriptorForExtension(ext)
+		if err != nil {
+			return err
+		}
+		flds[i] = fd
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, fd := range flds {
+		r.putExtensionLocked(fd)
+	}
+	return nil
+}
+
+// AddExtension adds the given extensions to the registry. The given extensions
+// will overwrite any previously added extensions that are for the same extendee
+// message and same extension tag number.
+func (r *ExtensionRegistry) AddExtension(exts ...*desc.FieldDescriptor) error {
+	for _, ext := range exts {
+		if !ext.IsExtension() {
+			return fmt.Errorf("given field is not an extension: %s", ext.GetFullyQualifiedName())
+		}
+	}
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range exts {
+		r.putExtensionLocked(ext)
+	}
+	return nil
+}
+
+// AddExtensionsFromFile adds to the registry all extension fields defined in the given file descriptor.
+func (r *ExtensionRegistry) AddExtensionsFromFile(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	r.addExtensionsFromFileLocked(fd, false, nil)
+}
+
+// AddExtensionsFromFileRecursively adds to the registry all extension fields defined in the give file
+// descriptor and also recursively adds all extensions defined in that file's dependencies. This adds
+// extensions from the entire transitive closure for the given file.
+func (r *ExtensionRegistry) AddExtensionsFromFileRecursively(fd *desc.FileDescriptor) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	already := map[*desc.FileDescriptor]struct{}{}
+	r.addExtensionsFromFileLocked(fd, true, already)
+}
+
+func (r *ExtensionRegistry) addExtensionsFromFileLocked(fd *desc.FileDescriptor, recursive bool, alreadySeen map[*desc.FileDescriptor]struct{}) {
+	if _, ok := alreadySeen[fd]; ok {
+		return
+	}
+
+	if r.exts == nil {
+		r.exts = map[string]map[int32]*desc.FieldDescriptor{}
+	}
+	for _, ext := range fd.GetExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range fd.GetMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+
+	if recursive {
+		alreadySeen[fd] = struct{}{}
+		for _, dep := range fd.GetDependencies() {
+			r.addExtensionsFromFileLocked(dep, recursive, alreadySeen)
+		}
+	}
+}
+
+func (r *ExtensionRegistry) addExtensionsFromMessageLocked(md *desc.MessageDescriptor) {
+	for _, ext := range md.GetNestedExtensions() {
+		r.putExtensionLocked(ext)
+	}
+	for _, msg := range md.GetNestedMessageTypes() {
+		r.addExtensionsFromMessageLocked(msg)
+	}
+}
+
+func (r *ExtensionRegistry) putExtensionLocked(fd *desc.FieldDescriptor) {
+	msgName := fd.GetOwner().GetFullyQualifiedName()
+	m := r.exts[msgName]
+	if m == nil {
+		m = map[int32]*desc.FieldDescriptor{}
+		r.exts[msgName] = m
+	}
+	m[fd.GetNumber()] = fd
+}
+
+// FindExtension queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and tag number. If no extension is known, nil is returned.
+func (r *ExtensionRegistry) FindExtension(messageName string, tagNumber int32) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	fd := r.exts[messageName][tagNumber]
+	if fd == nil && r.includeDefault {
+		ext := getDefaultExtensions(messageName)[tagNumber]
+		if ext != nil {
+			fd, _ = desc.LoadFieldDescriptorForExtension(ext)
+		}
+	}
+	return fd
+}
+
+// FindExtensionByName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and field name (must also be a fully-qualified extension name). If no extension is known, nil
+// is returned.
+func (r *ExtensionRegistry) FindExtensionByName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+// FindExtensionByJSONName queries for the extension field with the given extendee name (must be a fully-qualified
+// message name) and JSON field name (must also be a fully-qualified name). If no extension is known, nil is returned.
+// The fully-qualified JSON name is the same as the extension's normal fully-qualified name except that the last
+// component uses the field's JSON name (if present).
+func (r *ExtensionRegistry) FindExtensionByJSONName(messageName string, fieldName string) *desc.FieldDescriptor {
+	if r == nil {
+		return nil
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	for _, fd := range r.exts[messageName] {
+		if fd.GetFullyQualifiedJSONName() == fieldName {
+			return fd
+		}
+	}
+	if r.includeDefault {
+		for _, ext := range getDefaultExtensions(messageName) {
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd.GetFullyQualifiedJSONName() == fieldName {
+				return fd
+			}
+		}
+	}
+	return nil
+}
+
+func getDefaultExtensions(messageName string) map[int32]*proto.ExtensionDesc {
+	t := proto.MessageType(messageName)
+	if t != nil {
+		msg := reflect.Zero(t).Interface().(proto.Message)
+		return proto.RegisteredExtensions(msg)
+	}
+	return nil
+}
+
+// AllExtensionsForType returns all known extension fields for the given extendee name (must be a
+// fully-qualified message name).
+func (r *ExtensionRegistry) AllExtensionsForType(messageName string) []*desc.FieldDescriptor {
+	if r == nil {
+		return []*desc.FieldDescriptor(nil)
+	}
+	r.mu.RLock()
+	defer r.mu.RUnlock()
+	flds := r.exts[messageName]
+	var ret []*desc.FieldDescriptor
+	if r.includeDefault {
+		exts := getDefaultExtensions(messageName)
+		if len(exts) > 0 || len(flds) > 0 {
+			ret = make([]*desc.FieldDescriptor, 0, len(exts)+len(flds))
+		}
+		for tag, ext := range exts {
+			if _, ok := flds[tag]; ok {
+				// skip default extension and use the one explicitly registered instead
+				continue
+			}
+			fd, _ := desc.LoadFieldDescriptorForExtension(ext)
+			if fd != nil {
+				ret = append(ret, fd)
+			}
+		}
+	} else if len(flds) > 0 {
+		ret = make([]*desc.FieldDescriptor, 0, len(flds))
+	}
+
+	for _, ext := range flds {
+		ret = append(ret, ext)
+	}
+	return ret
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
new file mode 100644
index 0000000..1eaedfa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/grpcdynamic/stub.go
@@ -0,0 +1,303 @@
+// Package grpcdynamic provides a dynamic RPC stub. It can be used to invoke RPC
+// method where only method descriptors are known. The actual request and response
+// messages may be dynamic messages.
+package grpcdynamic
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/golang/protobuf/proto"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/metadata"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/dynamic"
+)
+
+// Stub is an RPC client stub, used for dynamically dispatching RPCs to a server.
+type Stub struct {
+	channel Channel
+	mf      *dynamic.MessageFactory
+}
+
+// Channel represents the operations necessary to issue RPCs via gRPC. The
+// *grpc.ClientConn type provides this interface and will typically the concrete
+// type used to construct Stubs. But the use of this interface allows
+// construction of stubs that use alternate concrete types as the transport for
+// RPC operations.
+type Channel interface {
+	Invoke(ctx context.Context, method string, args, reply interface{}, opts ...grpc.CallOption) error
+	NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)
+}
+
+var _ Channel = (*grpc.ClientConn)(nil)
+
+// NewStub creates a new RPC stub that uses the given channel for dispatching RPCs.
+func NewStub(channel Channel) Stub {
+	return NewStubWithMessageFactory(channel, nil)
+}
+
+// NewStubWithMessageFactory creates a new RPC stub that uses the given channel for
+// dispatching RPCs and the given MessageFactory for creating response messages.
+func NewStubWithMessageFactory(channel Channel, mf *dynamic.MessageFactory) Stub {
+	return Stub{channel: channel, mf: mf}
+}
+
+func requestMethod(md *desc.MethodDescriptor) string {
+	return fmt.Sprintf("/%s/%s", md.GetService().GetFullyQualifiedName(), md.GetName())
+}
+
+// InvokeRpc sends a unary RPC and returns the response. Use this for unary methods.
+func (s Stub) InvokeRpc(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (proto.Message, error) {
+	if method.IsClientStreaming() || method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpc is for unary methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	if err := checkMessageType(method.GetInputType(), request); err != nil {
+		return nil, err
+	}
+	resp := s.mf.NewMessage(method.GetOutputType())
+	if err := s.channel.Invoke(ctx, requestMethod(method), request, resp, opts...); err != nil {
+		return nil, err
+	}
+	return resp, nil
+}
+
+// InvokeRpcServerStream sends a unary RPC and returns the response stream. Use this for server-streaming methods.
+func (s Stub) InvokeRpcServerStream(ctx context.Context, method *desc.MethodDescriptor, request proto.Message, opts ...grpc.CallOption) (*ServerStream, error) {
+	if method.IsClientStreaming() || !method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcServerStream is for server-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	if err := checkMessageType(method.GetInputType(), request); err != nil {
+		return nil, err
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		err = cs.SendMsg(request)
+		if err != nil {
+			cancel()
+			return nil, err
+		}
+		err = cs.CloseSend()
+		if err != nil {
+			cancel()
+			return nil, err
+		}
+		return &ServerStream{cs, method.GetOutputType(), s.mf}, nil
+	}
+}
+
+// InvokeRpcClientStream creates a new stream that is used to send request messages and, at the end,
+// receive the response message. Use this for client-streaming methods.
+func (s Stub) InvokeRpcClientStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*ClientStream, error) {
+	if !method.IsClientStreaming() || method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcClientStream is for client-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		return &ClientStream{cs, method, s.mf, cancel}, nil
+	}
+}
+
+// InvokeRpcBidiStream creates a new stream that is used to both send request messages and receive response
+// messages. Use this for bidi-streaming methods.
+func (s Stub) InvokeRpcBidiStream(ctx context.Context, method *desc.MethodDescriptor, opts ...grpc.CallOption) (*BidiStream, error) {
+	if !method.IsClientStreaming() || !method.IsServerStreaming() {
+		return nil, fmt.Errorf("InvokeRpcBidiStream is for bidi-streaming methods; %q is %s", method.GetFullyQualifiedName(), methodType(method))
+	}
+	sd := grpc.StreamDesc{
+		StreamName:    method.GetName(),
+		ServerStreams: method.IsServerStreaming(),
+		ClientStreams: method.IsClientStreaming(),
+	}
+	if cs, err := s.channel.NewStream(ctx, &sd, requestMethod(method), opts...); err != nil {
+		return nil, err
+	} else {
+		return &BidiStream{cs, method.GetInputType(), method.GetOutputType(), s.mf}, nil
+	}
+}
+
+func methodType(md *desc.MethodDescriptor) string {
+	if md.IsClientStreaming() && md.IsServerStreaming() {
+		return "bidi-streaming"
+	} else if md.IsClientStreaming() {
+		return "client-streaming"
+	} else if md.IsServerStreaming() {
+		return "server-streaming"
+	} else {
+		return "unary"
+	}
+}
+
+func checkMessageType(md *desc.MessageDescriptor, msg proto.Message) error {
+	var typeName string
+	if dm, ok := msg.(*dynamic.Message); ok {
+		typeName = dm.GetMessageDescriptor().GetFullyQualifiedName()
+	} else {
+		typeName = proto.MessageName(msg)
+	}
+	if typeName != md.GetFullyQualifiedName() {
+		return fmt.Errorf("expecting message of type %s; got %s", md.GetFullyQualifiedName(), typeName)
+	}
+	return nil
+}
+
+// ServerStream represents a response stream from a server. Messages in the stream can be queried
+// as can header and trailer metadata sent by the server.
+type ServerStream struct {
+	stream   grpc.ClientStream
+	respType *desc.MessageDescriptor
+	mf       *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ServerStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ServerStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ServerStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *ServerStream) RecvMsg() (proto.Message, error) {
+	resp := s.mf.NewMessage(s.respType)
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
+
+// ClientStream represents a response stream from a client. Messages in the stream can be sent
+// and, when done, the unary server message and header and trailer metadata can be queried.
+type ClientStream struct {
+	stream grpc.ClientStream
+	method *desc.MethodDescriptor
+	mf     *dynamic.MessageFactory
+	cancel context.CancelFunc
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *ClientStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *ClientStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *ClientStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *ClientStream) SendMsg(m proto.Message) error {
+	if err := checkMessageType(s.method.GetInputType(), m); err != nil {
+		return err
+	}
+	return s.stream.SendMsg(m)
+}
+
+// CloseAndReceive closes the outgoing request stream and then blocks for the server's response.
+func (s *ClientStream) CloseAndReceive() (proto.Message, error) {
+	if err := s.stream.CloseSend(); err != nil {
+		return nil, err
+	}
+	resp := s.mf.NewMessage(s.method.GetOutputType())
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	}
+	// make sure we get EOF for a second message
+	if err := s.stream.RecvMsg(resp); err != io.EOF {
+		if err == nil {
+			s.cancel()
+			return nil, fmt.Errorf("client-streaming method %q returned more than one response message", s.method.GetFullyQualifiedName())
+		} else {
+			return nil, err
+		}
+	}
+	return resp, nil
+}
+
+// BidiStream represents a bi-directional stream for sending messages to and receiving
+// messages from a server. The header and trailer metadata sent by the server can also be
+// queried.
+type BidiStream struct {
+	stream   grpc.ClientStream
+	reqType  *desc.MessageDescriptor
+	respType *desc.MessageDescriptor
+	mf       *dynamic.MessageFactory
+}
+
+// Header returns any header metadata sent by the server (blocks if necessary until headers are
+// received).
+func (s *BidiStream) Header() (metadata.MD, error) {
+	return s.stream.Header()
+}
+
+// Trailer returns the trailer metadata sent by the server. It must only be called after
+// RecvMsg returns a non-nil error (which may be EOF for normal completion of stream).
+func (s *BidiStream) Trailer() metadata.MD {
+	return s.stream.Trailer()
+}
+
+// Context returns the context associated with this streaming operation.
+func (s *BidiStream) Context() context.Context {
+	return s.stream.Context()
+}
+
+// SendMsg sends a request message to the server.
+func (s *BidiStream) SendMsg(m proto.Message) error {
+	if err := checkMessageType(s.reqType, m); err != nil {
+		return err
+	}
+	return s.stream.SendMsg(m)
+}
+
+// CloseSend indicates the request stream has ended. Invoke this after all request messages
+// are sent (even if there are zero such messages).
+func (s *BidiStream) CloseSend() error {
+	return s.stream.CloseSend()
+}
+
+// RecvMsg returns the next message in the response stream or an error. If the stream
+// has completed normally, the error is io.EOF. Otherwise, the error indicates the
+// nature of the abnormal termination of the stream.
+func (s *BidiStream) RecvMsg() (proto.Message, error) {
+	resp := s.mf.NewMessage(s.respType)
+	if err := s.stream.RecvMsg(resp); err != nil {
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/indent.go b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
new file mode 100644
index 0000000..bd7fcaa
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/indent.go
@@ -0,0 +1,76 @@
+package dynamic
+
+import "bytes"
+
+type indentBuffer struct {
+	bytes.Buffer
+	indent      string
+	indentCount int
+	comma       bool
+}
+
+func (b *indentBuffer) start() error {
+	if b.indentCount >= 0 {
+		b.indentCount++
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) sep() error {
+	if b.indentCount >= 0 {
+		_, err := b.WriteString(": ")
+		return err
+	} else {
+		return b.WriteByte(':')
+	}
+}
+
+func (b *indentBuffer) end() error {
+	if b.indentCount >= 0 {
+		b.indentCount--
+		return b.newLine(false)
+	}
+	return nil
+}
+
+func (b *indentBuffer) maybeNext(first *bool) error {
+	if *first {
+		*first = false
+		return nil
+	} else {
+		return b.next()
+	}
+}
+
+func (b *indentBuffer) next() error {
+	if b.indentCount >= 0 {
+		return b.newLine(b.comma)
+	} else if b.comma {
+		return b.WriteByte(',')
+	} else {
+		return b.WriteByte(' ')
+	}
+}
+
+func (b *indentBuffer) newLine(comma bool) error {
+	if comma {
+		err := b.WriteByte(',')
+		if err != nil {
+			return err
+		}
+	}
+
+	err := b.WriteByte('\n')
+	if err != nil {
+		return err
+	}
+
+	for i := 0; i < b.indentCount; i++ {
+		_, err := b.WriteString(b.indent)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/json.go b/vendor/github.com/jhump/protoreflect/dynamic/json.go
new file mode 100644
index 0000000..f79b4ac
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/json.go
@@ -0,0 +1,1201 @@
+package dynamic
+
+// JSON marshalling and unmarshalling for dynamic messages
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	// link in the well-known-types that have a special JSON format
+	_ "github.com/golang/protobuf/ptypes/any"
+	_ "github.com/golang/protobuf/ptypes/duration"
+	_ "github.com/golang/protobuf/ptypes/empty"
+	_ "github.com/golang/protobuf/ptypes/struct"
+	_ "github.com/golang/protobuf/ptypes/timestamp"
+	_ "github.com/golang/protobuf/ptypes/wrappers"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+var wellKnownTypeNames = map[string]struct{}{
+	"google.protobuf.Any":       {},
+	"google.protobuf.Empty":     {},
+	"google.protobuf.Duration":  {},
+	"google.protobuf.Timestamp": {},
+	// struct.proto
+	"google.protobuf.Struct":    {},
+	"google.protobuf.Value":     {},
+	"google.protobuf.ListValue": {},
+	// wrappers.proto
+	"google.protobuf.DoubleValue": {},
+	"google.protobuf.FloatValue":  {},
+	"google.protobuf.Int64Value":  {},
+	"google.protobuf.UInt64Value": {},
+	"google.protobuf.Int32Value":  {},
+	"google.protobuf.UInt32Value": {},
+	"google.protobuf.BoolValue":   {},
+	"google.protobuf.StringValue": {},
+	"google.protobuf.BytesValue":  {},
+}
+
+// MarshalJSON serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a compact form: no newlines, and spaces between fields and
+// between field identifiers and values are elided.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSON() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{})
+}
+
+// MarshalJSONIndent serializes this message to bytes in JSON format, returning
+// an error if the operation fails. The resulting bytes will be a valid UTF8
+// string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values. Indentation of two spaces is
+// used.
+//
+// This method is convenient shorthand for invoking MarshalJSONPB with a default
+// (zero value) marshaler:
+//
+//    m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+//
+// So enums are serialized using enum value name strings, and values that are
+// not present (including those with default/zero value for messages defined in
+// "proto3" syntax) are omitted.
+func (m *Message) MarshalJSONIndent() ([]byte, error) {
+	return m.MarshalJSONPB(&jsonpb.Marshaler{Indent: "  "})
+}
+
+// MarshalJSONPB serializes this message to bytes in JSON format, returning an
+// error if the operation fails. The resulting bytes will be a valid UTF8
+// string. The given marshaler is used to convey options used during marshaling.
+//
+// If this message contains nested messages that are generated message types (as
+// opposed to dynamic messages), the given marshaler is used to marshal it.
+//
+// When marshaling any nested messages, any jsonpb.AnyResolver configured in the
+// given marshaler is augmented with knowledge of message types known to this
+// message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) MarshalJSONPB(opts *jsonpb.Marshaler) ([]byte, error) {
+	var b indentBuffer
+	b.indent = opts.Indent
+	if len(opts.Indent) == 0 {
+		b.indentCount = -1
+	}
+	b.comma = true
+	if err := m.marshalJSON(&b, opts); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalJSON(b *indentBuffer, opts *jsonpb.Marshaler) error {
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := marshalWellKnownType(m, b, opts); ok {
+		return err
+	}
+
+	err := b.WriteByte('{')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	var tags []int
+	if opts.EmitDefaults {
+		tags = m.allKnownFieldTags()
+	} else {
+		tags = m.knownFieldTags()
+	}
+
+	first := true
+
+	for _, tag := range tags {
+		itag := int32(tag)
+		fd := m.FindFieldDescriptor(itag)
+
+		v, ok := m.values[itag]
+		if !ok {
+			if fd.GetOneOf() != nil {
+				// don't print defaults for fields in a oneof
+				continue
+			}
+			v = fd.GetDefaultValue()
+		}
+
+		err := b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		err = marshalKnownFieldJSON(b, fd, v, opts)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	err = b.WriteByte('}')
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func marshalWellKnownType(m *Message, b *indentBuffer, opts *jsonpb.Marshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// convert dynamic message to well-known type and let jsonpb marshal it
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := m.MergeInto(msg); err != nil {
+		return true, err
+	}
+	return true, opts.Marshal(b, msg)
+}
+
+func marshalKnownFieldJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	var jsonName string
+	if opts.OrigName {
+		jsonName = fd.GetName()
+	} else {
+		jsonName = fd.AsFieldDescriptorProto().GetJsonName()
+		if jsonName == "" {
+			jsonName = fd.GetName()
+		}
+	}
+	if fd.IsExtension() {
+		var scope string
+		switch parent := fd.GetParent().(type) {
+		case *desc.FileDescriptor:
+			scope = parent.GetPackage()
+		default:
+			scope = parent.GetFullyQualifiedName()
+		}
+		if scope == "" {
+			jsonName = fmt.Sprintf("[%s]", jsonName)
+		} else {
+			jsonName = fmt.Sprintf("[%s.%s]", scope, jsonName)
+		}
+	}
+	err := writeJsonString(b, jsonName)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	if isNil(v) {
+		_, err := b.WriteString("null")
+		return err
+	}
+
+	if fd.IsMap() {
+		err = b.WriteByte('{')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		md := fd.GetMessageType()
+		vfd := md.FindFieldByNumber(2)
+
+		mp := v.(map[interface{}]interface{})
+		keys := make([]interface{}, 0, len(mp))
+		for k := range mp {
+			keys = append(keys, k)
+		}
+		sort.Sort(sortable(keys))
+		first := true
+		for _, mk := range keys {
+			mv := mp[mk]
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+
+			err = marshalKnownFieldMapEntryJSON(b, mk, vfd, mv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte('}')
+
+	} else if fd.IsRepeated() {
+		err = b.WriteByte('[')
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+
+		sl := v.([]interface{})
+		first := true
+		for _, slv := range sl {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldValueJSON(b, fd, slv, opts)
+			if err != nil {
+				return err
+			}
+		}
+
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		return b.WriteByte(']')
+
+	} else {
+		return marshalKnownFieldValueJSON(b, fd, v, opts)
+	}
+}
+
+func isNil(v interface{}) bool {
+	if v == nil {
+		return true
+	}
+	rv := reflect.ValueOf(v)
+	return rv.Kind() == reflect.Ptr && rv.IsNil()
+}
+
+func marshalKnownFieldMapEntryJSON(b *indentBuffer, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}, opts *jsonpb.Marshaler) error {
+	rk := reflect.ValueOf(mk)
+	var strkey string
+	switch rk.Kind() {
+	case reflect.Bool:
+		strkey = strconv.FormatBool(rk.Bool())
+	case reflect.Int32, reflect.Int64:
+		strkey = strconv.FormatInt(rk.Int(), 10)
+	case reflect.Uint32, reflect.Uint64:
+		strkey = strconv.FormatUint(rk.Uint(), 10)
+	case reflect.String:
+		strkey = rk.String()
+	default:
+		return fmt.Errorf("invalid map key value: %v (%v)", mk, rk.Type())
+	}
+	err := writeString(b, strkey)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+	return marshalKnownFieldValueJSON(b, vfd, mv, opts)
+}
+
+func marshalKnownFieldValueJSON(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}, opts *jsonpb.Marshaler) error {
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int32, reflect.Int64:
+		ed := fd.GetEnumType()
+		if !opts.EnumsAsInts && ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				return writeJsonString(b, vd.GetName())
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint32, reflect.Uint64:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = `"NaN"`
+		} else if math.IsInf(f, 1) {
+			str = `"Infinity"`
+		} else if math.IsInf(f, -1) {
+			str = `"-Infinity"`
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		bstr := base64.StdEncoding.EncodeToString(rv.Bytes())
+		return writeJsonString(b, bstr)
+	case reflect.String:
+		return writeJsonString(b, rv.String())
+	default:
+		// must be a message
+		if dm, ok := v.(*Message); ok {
+			return dm.marshalJSON(b, opts)
+		} else {
+			var err error
+			if b.indentCount <= 0 || len(b.indent) == 0 {
+				err = opts.Marshal(b, v.(proto.Message))
+			} else {
+				str, err := opts.MarshalToString(v.(proto.Message))
+				if err != nil {
+					return err
+				}
+				indent := strings.Repeat(b.indent, b.indentCount)
+				pos := 0
+				// add indention prefix to each line
+				for pos < len(str) {
+					start := pos
+					nextPos := strings.Index(str[pos:], "\n")
+					if nextPos == -1 {
+						nextPos = len(str)
+					} else {
+						nextPos = pos + nextPos + 1 // include newline
+					}
+					line := str[start:nextPos]
+					if pos > 0 {
+						_, err = b.WriteString(indent)
+						if err != nil {
+							return err
+						}
+					}
+					_, err = b.WriteString(line)
+					if err != nil {
+						return err
+					}
+					pos = nextPos
+				}
+			}
+			return err
+		}
+	}
+}
+
+func writeJsonString(b *indentBuffer, s string) error {
+	if sbytes, err := json.Marshal(s); err != nil {
+		return err
+	} else {
+		_, err := b.Write(sbytes)
+		return err
+	}
+}
+
+// UnmarshalJSON de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// This method is shorthand for invoking UnmarshalJSONPB with a default (zero
+// value) unmarshaler:
+//
+//    m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+//
+// So unknown fields will result in an error, and no provided jsonpb.AnyResolver
+// will be used when parsing google.protobuf.Any messages.
+func (m *Message) UnmarshalJSON(js []byte) error {
+	return m.UnmarshalJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalMergeJSON de-serializes the message that is present, in JSON format,
+// in the given bytes into this message. Unlike UnmarshalJSON, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSON(js []byte) error {
+	return m.UnmarshalMergeJSONPB(&jsonpb.Unmarshaler{}, js)
+}
+
+// UnmarshalJSONPB de-serializes the message that is present, in JSON format, in
+// the given bytes into this message. The given unmarshaler conveys options used
+// when parsing the JSON. This function first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in JSON format.
+//
+// The decoding is lenient:
+//  1. The JSON can refer to fields either by their JSON name or by their
+//     declared name.
+//  2. The JSON can use either numeric values or string names for enum values.
+//
+// When instantiating nested messages, if this message's associated factory
+// returns a generated message type (as opposed to a dynamic message), the given
+// unmarshaler is used to unmarshal it.
+//
+// When unmarshaling any nested messages, any jsonpb.AnyResolver configured in
+// the given unmarshaler is augmented with knowledge of message types known to
+// this message's descriptor (and its enclosing file and set of transitive
+// dependencies).
+func (m *Message) UnmarshalJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeJSONPB(opts, js); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeJSONPB de-serializes the message that is present, in JSON
+// format, in the given bytes into this message. The given unmarshaler conveys
+// options used when parsing the JSON. Unlike UnmarshalJSONPB, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeJSONPB(opts *jsonpb.Unmarshaler, js []byte) error {
+	r := newJsReader(js)
+	err := m.unmarshalJson(r, opts)
+	if err != nil {
+		return err
+	}
+	if t, err := r.poll(); err != io.EOF {
+		b, _ := ioutil.ReadAll(r.unread())
+		s := fmt.Sprintf("%v%s", t, string(b))
+		return fmt.Errorf("superfluous data found after JSON object: %q", s)
+	}
+	return nil
+}
+
+func unmarshalWellKnownType(m *Message, r *jsReader, opts *jsonpb.Unmarshaler) (bool, error) {
+	fqn := m.md.GetFullyQualifiedName()
+	if _, ok := wellKnownTypeNames[fqn]; !ok {
+		return false, nil
+	}
+
+	msgType := proto.MessageType(fqn)
+	if msgType == nil {
+		// wtf?
+		panic(fmt.Sprintf("could not find registered message type for %q", fqn))
+	}
+
+	// extract json value from r
+	var js json.RawMessage
+	if err := json.NewDecoder(r.unread()).Decode(&js); err != nil {
+		return true, err
+	}
+	if err := r.skip(); err != nil {
+		return true, err
+	}
+
+	// unmarshal into well-known type and then convert to dynamic message
+	msg := reflect.New(msgType.Elem()).Interface().(proto.Message)
+	if err := opts.Unmarshal(bytes.NewReader(js), msg); err != nil {
+		return true, err
+	}
+	return true, m.MergeFrom(msg)
+}
+
+func (m *Message) unmarshalJson(r *jsReader, opts *jsonpb.Unmarshaler) error {
+	if r, changed := wrapResolver(opts.AnyResolver, m.mf, m.md.GetFile()); changed {
+		newOpts := *opts
+		newOpts.AnyResolver = r
+		opts = &newOpts
+	}
+
+	if ok, err := unmarshalWellKnownType(m, r, opts); ok {
+		return err
+	}
+
+	t, err := r.peek()
+	if err != nil {
+		return err
+	}
+	if t == nil {
+		// if json is simply "null" we do nothing
+		r.poll()
+		return nil
+	}
+
+	if err := r.beginObject(); err != nil {
+		return err
+	}
+
+	for r.hasNext() {
+		f, err := r.nextObjectKey()
+		if err != nil {
+			return err
+		}
+		fd := m.FindFieldDescriptorByJSONName(f)
+		if fd == nil {
+			if opts.AllowUnknownFields {
+				r.skip()
+				continue
+			}
+			return fmt.Errorf("message type %s has no known field named %s", m.md.GetFullyQualifiedName(), f)
+		}
+		v, err := unmarshalJsField(fd, r, m.mf, opts)
+		if err != nil {
+			return err
+		}
+		if v != nil {
+			if err := mergeField(m, fd, v); err != nil {
+				return err
+			}
+		} else if fd.GetOneOf() != nil {
+			// preserve explicit null for oneof fields (this is a little odd but
+			// mimics the behavior of jsonpb with oneofs in generated message types)
+			if fd.GetMessageType() != nil {
+				typ := m.mf.GetKnownTypeRegistry().GetKnownType(fd.GetMessageType().GetFullyQualifiedName())
+				if typ != nil {
+					// typed nil
+					if typ.Kind() != reflect.Ptr {
+						typ = reflect.PtrTo(typ)
+					}
+					v = reflect.Zero(typ).Interface()
+				} else {
+					// can't use nil dynamic message, so we just use empty one instead
+					v = m.mf.NewDynamicMessage(fd.GetMessageType())
+				}
+				if err := m.setField(fd, v); err != nil {
+					return err
+				}
+			} else {
+				// not a message... explicit null makes no sense
+				return fmt.Errorf("message type %s cannot set field %s to null: it is not a message type", m.md.GetFullyQualifiedName(), f)
+			}
+		} else {
+			m.clearField(fd)
+		}
+	}
+
+	if err := r.endObject(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func isWellKnownValue(fd *desc.FieldDescriptor) bool {
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.Value"
+}
+
+func isWellKnownListValue(fd *desc.FieldDescriptor) bool {
+	return !fd.IsRepeated() && fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE &&
+		fd.GetMessageType().GetFullyQualifiedName() == "google.protobuf.ListValue"
+}
+
+func unmarshalJsField(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && !isWellKnownValue(fd) {
+		// if value is null, just return nil
+		// (unless field is google.protobuf.Value, in which case
+		// we fall through to parse it as an instance where its
+		// underlying value is set to a NullValue)
+		r.poll()
+		return nil, nil
+	}
+
+	if t == json.Delim('{') && fd.IsMap() {
+		entryType := fd.GetMessageType()
+		keyType := entryType.FindFieldByNumber(1)
+		valueType := entryType.FindFieldByNumber(2)
+		mp := map[interface{}]interface{}{}
+
+		// TODO: if there are just two map keys "key" and "value" and they have the right type of values,
+		// treat this JSON object as a single map entry message. (In keeping with support of map fields as
+		// if they were normal repeated field of entry messages as well as supporting a transition from
+		// optional to repeated...)
+
+		if err := r.beginObject(); err != nil {
+			return nil, err
+		}
+		for r.hasNext() {
+			kk, err := unmarshalJsFieldElement(keyType, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			vv, err := unmarshalJsFieldElement(valueType, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			mp[kk] = vv
+		}
+		if err := r.endObject(); err != nil {
+			return nil, err
+		}
+
+		return mp, nil
+	} else if t == json.Delim('[') && !isWellKnownListValue(fd) {
+		// We support parsing an array, even if field is not repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is not repeated, we only keep the last value in the array.
+
+		if err := r.beginArray(); err != nil {
+			return nil, err
+		}
+		var sl []interface{}
+		var v interface{}
+		for r.hasNext() {
+			var err error
+			v, err = unmarshalJsFieldElement(fd, r, mf, opts)
+			if err != nil {
+				return nil, err
+			}
+			if fd.IsRepeated() && v != nil {
+				sl = append(sl, v)
+			}
+		}
+		if err := r.endArray(); err != nil {
+			return nil, err
+		}
+		if fd.IsMap() {
+			mp := map[interface{}]interface{}{}
+			for _, m := range sl {
+				msg := m.(*Message)
+				kk, err := msg.TryGetFieldByNumber(1)
+				if err != nil {
+					return nil, err
+				}
+				vv, err := msg.TryGetFieldByNumber(2)
+				if err != nil {
+					return nil, err
+				}
+				mp[kk] = vv
+			}
+			return mp, nil
+		} else if fd.IsRepeated() {
+			return sl, nil
+		} else {
+			return v, nil
+		}
+	} else {
+		// We support parsing a singular value, even if field is repeated, to mimic support in proto
+		// binary wire format that supports changing an optional field to repeated and vice versa.
+		// If the field is repeated, we store value as singleton slice of that one value.
+
+		v, err := unmarshalJsFieldElement(fd, r, mf, opts)
+		if err != nil {
+			return nil, err
+		}
+		if v == nil {
+			return nil, nil
+		}
+		if fd.IsRepeated() {
+			return []interface{}{v}, nil
+		} else {
+			return v, nil
+		}
+	}
+}
+
+func unmarshalJsFieldElement(fd *desc.FieldDescriptor, r *jsReader, mf *MessageFactory, opts *jsonpb.Unmarshaler) (interface{}, error) {
+	t, err := r.peek()
+	if err != nil {
+		return nil, err
+	}
+
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+		m := mf.NewMessage(fd.GetMessageType())
+		if dm, ok := m.(*Message); ok {
+			if err := dm.unmarshalJson(r, opts); err != nil {
+				return nil, err
+			}
+		} else {
+			var msg json.RawMessage
+			if err := json.NewDecoder(r.unread()).Decode(&msg); err != nil {
+				return nil, err
+			}
+			if err := r.skip(); err != nil {
+				return nil, err
+			}
+			if err := opts.Unmarshal(bytes.NewReader([]byte(msg)), m); err != nil {
+				return nil, err
+			}
+		}
+		return m, nil
+
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if e, err := r.nextNumber(); err != nil {
+			return nil, err
+		} else {
+			// value could be string or number
+			if i, err := e.Int64(); err != nil {
+				// number cannot be parsed, so see if it's an enum value name
+				vd := fd.GetEnumType().FindValueByName(string(e))
+				if vd != nil {
+					return vd.GetNumber(), nil
+				} else {
+					return nil, fmt.Errorf("enum %q does not have value named %q", fd.GetEnumType().GetFullyQualifiedName(), e)
+				}
+			} else if i > math.MaxInt32 || i < math.MinInt32 {
+				return nil, NumericOverflowError
+			} else {
+				return int32(i), err
+			}
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if i, err := r.nextInt(); err != nil {
+			return nil, err
+		} else if i > math.MaxInt32 || i < math.MinInt32 {
+			return nil, NumericOverflowError
+		} else {
+			return int32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		return r.nextInt()
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if i, err := r.nextUint(); err != nil {
+			return nil, err
+		} else if i > math.MaxUint32 {
+			return nil, NumericOverflowError
+		} else {
+			return uint32(i), err
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		return r.nextUint()
+
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if str, ok := t.(string); ok {
+			if str == "true" {
+				r.poll() // consume token
+				return true, err
+			} else if str == "false" {
+				r.poll() // consume token
+				return false, err
+			}
+		}
+		return r.nextBool()
+
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		if f, err := r.nextFloat(); err != nil {
+			return nil, err
+		} else {
+			return float32(f), nil
+		}
+
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		return r.nextFloat()
+
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return r.nextBytes()
+
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return r.nextString()
+
+	default:
+		return nil, fmt.Errorf("unknown field type: %v", fd.GetType())
+	}
+}
+
+type jsReader struct {
+	reader  *bytes.Reader
+	dec     *json.Decoder
+	current json.Token
+	peeked  bool
+}
+
+func newJsReader(b []byte) *jsReader {
+	reader := bytes.NewReader(b)
+	dec := json.NewDecoder(reader)
+	dec.UseNumber()
+	return &jsReader{reader: reader, dec: dec}
+}
+
+func (r *jsReader) unread() io.Reader {
+	bufs := make([]io.Reader, 3)
+	var peeked []byte
+	if r.peeked {
+		if _, ok := r.current.(json.Delim); ok {
+			peeked = []byte(fmt.Sprintf("%v", r.current))
+		} else {
+			peeked, _ = json.Marshal(r.current)
+		}
+	}
+	readerCopy := *r.reader
+	decCopy := *r.dec
+
+	bufs[0] = bytes.NewReader(peeked)
+	bufs[1] = decCopy.Buffered()
+	bufs[2] = &readerCopy
+	return &concatReader{bufs: bufs}
+}
+
+func (r *jsReader) hasNext() bool {
+	return r.dec.More()
+}
+
+func (r *jsReader) peek() (json.Token, error) {
+	if r.peeked {
+		return r.current, nil
+	}
+	t, err := r.dec.Token()
+	if err != nil {
+		return nil, err
+	}
+	r.peeked = true
+	r.current = t
+	return t, nil
+}
+
+func (r *jsReader) poll() (json.Token, error) {
+	if r.peeked {
+		ret := r.current
+		r.current = nil
+		r.peeked = false
+		return ret, nil
+	}
+	return r.dec.Token()
+}
+
+func (r *jsReader) beginObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('{') }, nil, "start of JSON object: '{'")
+	return err
+}
+
+func (r *jsReader) endObject() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('}') }, nil, "end of JSON object: '}'")
+	return err
+}
+
+func (r *jsReader) beginArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim('[') }, nil, "start of array: '['")
+	return err
+}
+
+func (r *jsReader) endArray() error {
+	_, err := r.expect(func(t json.Token) bool { return t == json.Delim(']') }, nil, "end of array: ']'")
+	return err
+}
+
+func (r *jsReader) nextObjectKey() (string, error) {
+	return r.nextString()
+}
+
+func (r *jsReader) nextString() (string, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(string); return ok }, "", "string")
+	if err != nil {
+		return "", err
+	}
+	return t.(string), nil
+}
+
+func (r *jsReader) nextBytes() ([]byte, error) {
+	str, err := r.nextString()
+	if err != nil {
+		return nil, err
+	}
+	return base64.StdEncoding.DecodeString(str)
+}
+
+func (r *jsReader) nextBool() (bool, error) {
+	t, err := r.expect(func(t json.Token) bool { _, ok := t.(bool); return ok }, false, "boolean")
+	if err != nil {
+		return false, err
+	}
+	return t.(bool), nil
+}
+
+func (r *jsReader) nextInt() (int64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Int64()
+}
+
+func (r *jsReader) nextUint() (uint64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return strconv.ParseUint(string(n), 10, 64)
+}
+
+func (r *jsReader) nextFloat() (float64, error) {
+	n, err := r.nextNumber()
+	if err != nil {
+		return 0, err
+	}
+	return n.Float64()
+}
+
+func (r *jsReader) nextNumber() (json.Number, error) {
+	t, err := r.expect(func(t json.Token) bool { return reflect.TypeOf(t).Kind() == reflect.String }, "0", "number")
+	if err != nil {
+		return "", err
+	}
+	switch t := t.(type) {
+	case json.Number:
+		return t, nil
+	case string:
+		return json.Number(t), nil
+	}
+	return "", fmt.Errorf("expecting a number but got %v", t)
+}
+
+func (r *jsReader) skip() error {
+	t, err := r.poll()
+	if err != nil {
+		return err
+	}
+	if t == json.Delim('[') {
+		if err := r.skipArray(); err != nil {
+			return err
+		}
+	} else if t == json.Delim('{') {
+		if err := r.skipObject(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (r *jsReader) skipArray() error {
+	for r.hasNext() {
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endArray(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) skipObject() error {
+	for r.hasNext() {
+		// skip object key
+		if err := r.skip(); err != nil {
+			return err
+		}
+		// and value
+		if err := r.skip(); err != nil {
+			return err
+		}
+	}
+	if err := r.endObject(); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (r *jsReader) expect(predicate func(json.Token) bool, ifNil interface{}, expected string) (interface{}, error) {
+	t, err := r.poll()
+	if err != nil {
+		return nil, err
+	}
+	if t == nil && ifNil != nil {
+		return ifNil, nil
+	}
+	if !predicate(t) {
+		return t, fmt.Errorf("bad input: expecting %s ; instead got %v", expected, t)
+	}
+	return t, nil
+}
+
+type concatReader struct {
+	bufs []io.Reader
+	curr int
+}
+
+func (r *concatReader) Read(p []byte) (n int, err error) {
+	for {
+		if r.curr >= len(r.bufs) {
+			err = io.EOF
+			return
+		}
+		var c int
+		c, err = r.bufs[r.curr].Read(p)
+		n += c
+		if err != io.EOF {
+			return
+		}
+		r.curr++
+		p = p[c:]
+	}
+}
+
+// AnyResolver returns a jsonpb.AnyResolver that uses the given file descriptors
+// to resolve message names. It uses the given factory, which may be nil, to
+// instantiate messages. The messages that it returns when resolving a type name
+// may often be dynamic messages.
+func AnyResolver(mf *MessageFactory, files ...*desc.FileDescriptor) jsonpb.AnyResolver {
+	return &anyResolver{mf: mf, files: files}
+}
+
+type anyResolver struct {
+	mf      *MessageFactory
+	files   []*desc.FileDescriptor
+	ignored map[*desc.FileDescriptor]struct{}
+	other   jsonpb.AnyResolver
+}
+
+func wrapResolver(r jsonpb.AnyResolver, mf *MessageFactory, f *desc.FileDescriptor) (jsonpb.AnyResolver, bool) {
+	if r, ok := r.(*anyResolver); ok {
+		if _, ok := r.ignored[f]; ok {
+			// if the current resolver is ignoring this file, it's because another
+			// (upstream) resolver is already handling it, so nothing to do
+			return r, false
+		}
+		for _, file := range r.files {
+			if file == f {
+				// no need to wrap!
+				return r, false
+			}
+		}
+		// ignore files that will be checked by the resolver we're wrapping
+		// (we'll just delegate and let it search those files)
+		ignored := map[*desc.FileDescriptor]struct{}{}
+		for i := range r.ignored {
+			ignored[i] = struct{}{}
+		}
+		ignore(r.files, ignored)
+		return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, ignored: ignored, other: r}, true
+	}
+	return &anyResolver{mf: mf, files: []*desc.FileDescriptor{f}, other: r}, true
+}
+
+func ignore(files []*desc.FileDescriptor, ignored map[*desc.FileDescriptor]struct{}) {
+	for _, f := range files {
+		if _, ok := ignored[f]; ok {
+			continue
+		}
+		ignored[f] = struct{}{}
+		ignore(f.GetDependencies(), ignored)
+	}
+}
+
+func (r *anyResolver) Resolve(typeUrl string) (proto.Message, error) {
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+
+	// see if the user-specified resolver is able to do the job
+	if r.other != nil {
+		msg, err := r.other.Resolve(typeUrl)
+		if err == nil {
+			return msg, nil
+		}
+	}
+
+	// try to find the message in our known set of files
+	checked := map[*desc.FileDescriptor]struct{}{}
+	for _, f := range r.files {
+		md := r.findMessage(f, mname, checked)
+		if md != nil {
+			return r.mf.NewMessage(md), nil
+		}
+	}
+	// failing that, see if the message factory knows about this type
+	var ktr *KnownTypeRegistry
+	if r.mf != nil {
+		ktr = r.mf.ktr
+	} else {
+		ktr = (*KnownTypeRegistry)(nil)
+	}
+	m := ktr.CreateIfKnown(mname)
+	if m != nil {
+		return m, nil
+	}
+
+	// no other resolver to fallback to? mimic default behavior
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+func (r *anyResolver) findMessage(fd *desc.FileDescriptor, msgName string, checked map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	// if this is an ignored descriptor, skip
+	if _, ok := r.ignored[fd]; ok {
+		return nil
+	}
+
+	// bail if we've already checked this file
+	if _, ok := checked[fd]; ok {
+		return nil
+	}
+	checked[fd] = struct{}{}
+
+	// see if this file has the message
+	md := fd.FindMessage(msgName)
+	if md != nil {
+		return md
+	}
+
+	// if not, recursively search the file's imports
+	for _, dep := range fd.GetDependencies() {
+		md = r.findMessage(dep, msgName, checked)
+		if md != nil {
+			return md
+		}
+	}
+	return nil
+}
+
+var _ jsonpb.AnyResolver = (*anyResolver)(nil)
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
new file mode 100644
index 0000000..bb68d7b
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.11.go
@@ -0,0 +1,129 @@
+//+build !go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// Pre-Go-1.12, we must use reflect.Value.MapKeys to reflectively
+// iterate a map. (We can be more efficient in Go 1.12 and up...)
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty because MapKeys()
+		// function allocates heavily.
+		return true
+	}
+
+	for _, k := range a.MapKeys() {
+		av := a.MapIndex(k)
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	for _, k := range val.MapKeys() {
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validFieldValueForRv(keyField, k)
+		if err != nil {
+			return nil, err
+		}
+		v := val.MapIndex(k)
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validFieldValueForRv(valField, v)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	for _, k := range src.MapKeys() {
+		if !canConvert(k, kt) {
+			return false
+		}
+		if !canConvert(src.MapIndex(k), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	for _, k := range src.MapKeys() {
+		v := src.MapIndex(k)
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	for _, k := range rv.MapKeys() {
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		v := rv.MapIndex(k)
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
new file mode 100644
index 0000000..f5ffd67
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/maps_1.12.go
@@ -0,0 +1,137 @@
+//+build go1.12
+
+package dynamic
+
+import (
+	"github.com/jhump/protoreflect/desc"
+	"reflect"
+)
+
+// With Go 1.12 and above, we can use reflect.Value.MapRange to iterate
+// over maps more efficiently than using reflect.Value.MapKeys.
+
+func mapsEqual(a, b reflect.Value) bool {
+	if a.Len() != b.Len() {
+		return false
+	}
+	if a.Len() == 0 && b.Len() == 0 {
+		// Optimize the case where maps are frequently empty
+		return true
+	}
+
+	iter := a.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		av := iter.Value()
+		bv := b.MapIndex(k)
+		if !bv.IsValid() {
+			return false
+		}
+		if !fieldsEqual(av.Interface(), bv.Interface()) {
+			return false
+		}
+	}
+	return true
+}
+
+func validFieldValueForMapField(fd *desc.FieldDescriptor, val reflect.Value) (interface{}, error) {
+	// make a defensive copy while we check the contents
+	// (also converts to map[interface{}]interface{} if it's some other type)
+	keyField := fd.GetMessageType().GetFields()[0]
+	valField := fd.GetMessageType().GetFields()[1]
+	m := map[interface{}]interface{}{}
+	iter := val.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		if k.Kind() == reflect.Interface {
+			// unwrap it
+			k = reflect.ValueOf(k.Interface())
+		}
+		kk, err := validFieldValueForRv(keyField, k)
+		if err != nil {
+			return nil, err
+		}
+		v := iter.Value()
+		if v.Kind() == reflect.Interface {
+			// unwrap it
+			v = reflect.ValueOf(v.Interface())
+		}
+		vv, err := validFieldValueForRv(valField, v)
+		if err != nil {
+			return nil, err
+		}
+		m[kk] = vv
+	}
+	return m, nil
+}
+
+func canConvertMap(src reflect.Value, target reflect.Type) bool {
+	kt := target.Key()
+	vt := target.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		if !canConvert(iter.Key(), kt) {
+			return false
+		}
+		if !canConvert(iter.Value(), vt) {
+			return false
+		}
+	}
+	return true
+}
+
+func mergeMapVal(src, target reflect.Value, targetType reflect.Type) error {
+	tkt := targetType.Key()
+	tvt := targetType.Elem()
+	iter := src.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		skt := k.Type()
+		svt := v.Type()
+		var nk, nv reflect.Value
+		if tkt == skt {
+			nk = k
+		} else if tkt.Kind() == reflect.Ptr && tkt.Elem() == skt {
+			nk = k.Addr()
+		} else {
+			nk = reflect.New(tkt).Elem()
+			if err := mergeVal(k, nk); err != nil {
+				return err
+			}
+		}
+		if tvt == svt {
+			nv = v
+		} else if tvt.Kind() == reflect.Ptr && tvt.Elem() == svt {
+			nv = v.Addr()
+		} else {
+			nv = reflect.New(tvt).Elem()
+			if err := mergeVal(v, nv); err != nil {
+				return err
+			}
+		}
+		if target.IsNil() {
+			target.Set(reflect.MakeMap(targetType))
+		}
+		target.SetMapIndex(nk, nv)
+	}
+	return nil
+}
+
+func mergeMapField(m *Message, fd *desc.FieldDescriptor, rv reflect.Value) error {
+	iter := rv.MapRange()
+	for iter.Next() {
+		k := iter.Key()
+		v := iter.Value()
+		if k.Kind() == reflect.Interface && !k.IsNil() {
+			k = k.Elem()
+		}
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			v = v.Elem()
+		}
+		if err := m.putMapField(fd, k.Interface(), v.Interface()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/merge.go b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
new file mode 100644
index 0000000..ce727fd
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/merge.go
@@ -0,0 +1,100 @@
+package dynamic
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// Merge merges the given source message into the given destination message. Use
+// use this instead of proto.Merge when one or both of the messages might be a
+// a dynamic message. If there is a problem merging the messages, such as the
+// two messages having different types, then this method will panic (just as
+// proto.Merges does).
+func Merge(dst, src proto.Message) {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			panic(err.Error())
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			panic(err.Error())
+		}
+	} else {
+		proto.Merge(dst, src)
+	}
+}
+
+// TryMerge merges the given source message into the given destination message.
+// You can use this instead of proto.Merge when one or both of the messages
+// might be a dynamic message. Unlike proto.Merge, this method will return an
+// error on failure instead of panic'ing.
+func TryMerge(dst, src proto.Message) error {
+	if dm, ok := dst.(*Message); ok {
+		if err := dm.MergeFrom(src); err != nil {
+			return err
+		}
+	} else if dm, ok := src.(*Message); ok {
+		if err := dm.MergeInto(dst); err != nil {
+			return err
+		}
+	} else {
+		// proto.Merge panics on bad input, so we first verify
+		// inputs and return error instead of panic
+		out := reflect.ValueOf(dst)
+		if out.IsNil() {
+			return errors.New("proto: nil destination")
+		}
+		in := reflect.ValueOf(src)
+		if in.Type() != out.Type() {
+			return errors.New("proto: type mismatch")
+		}
+		proto.Merge(dst, src)
+	}
+	return nil
+}
+
+func mergeField(m *Message, fd *desc.FieldDescriptor, val interface{}) error {
+	rv := reflect.ValueOf(val)
+
+	if fd.IsMap() && rv.Kind() == reflect.Map {
+		return mergeMapField(m, fd, rv)
+	}
+
+	if fd.IsRepeated() && rv.Kind() == reflect.Slice && rv.Type() != typeOfBytes {
+		for i := 0; i < rv.Len(); i++ {
+			e := rv.Index(i)
+			if e.Kind() == reflect.Interface && !e.IsNil() {
+				e = e.Elem()
+			}
+			if err := m.addRepeatedField(fd, e.Interface()); err != nil {
+				return err
+			}
+		}
+		return nil
+	}
+
+	if fd.IsRepeated() {
+		return m.addRepeatedField(fd, val)
+	} else if fd.GetMessageType() == nil {
+		return m.setField(fd, val)
+	}
+
+	// it's a message type, so we want to merge contents
+	var err error
+	if val, err = validFieldValue(fd, val); err != nil {
+		return err
+	}
+
+	existing, _ := m.doGetField(fd, true)
+	if existing != nil && !reflect.ValueOf(existing).IsNil() {
+		return TryMerge(existing.(proto.Message), val.(proto.Message))
+	}
+
+	// no existing message, so just set field
+	m.internalSetField(fd, val)
+	return nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
new file mode 100644
index 0000000..6c54de8
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/message_factory.go
@@ -0,0 +1,189 @@
+package dynamic
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MessageFactory can be used to create new empty message objects. A default instance
+// (without extension registry or known-type registry specified) will always return
+// dynamic messages (e.g. type will be *dynamic.Message) except for "well-known" types.
+// The well-known types include primitive wrapper types and a handful of other special
+// types defined in standard protobuf definitions, like Any, Duration, and Timestamp.
+type MessageFactory struct {
+	er  *ExtensionRegistry
+	ktr *KnownTypeRegistry
+}
+
+// NewMessageFactoryWithExtensionRegistry creates a new message factory where any
+// dynamic messages produced will use the given extension registry to recognize and
+// parse extension fields.
+func NewMessageFactoryWithExtensionRegistry(er *ExtensionRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(er, nil)
+}
+
+// NewMessageFactoryWithKnownTypeRegistry creates a new message factory where the
+// known types, per the given registry, will be returned as normal protobuf messages
+// (e.g. generated structs, instead of dynamic messages).
+func NewMessageFactoryWithKnownTypeRegistry(ktr *KnownTypeRegistry) *MessageFactory {
+	return NewMessageFactoryWithRegistries(nil, ktr)
+}
+
+// NewMessageFactoryWithDefaults creates a new message factory where all "default" types
+// (those for which protoc-generated code is statically linked into the Go program) are
+// known types. If any dynamic messages are produced, they will recognize and parse all
+// "default" extension fields. This is the equivalent of:
+//   NewMessageFactoryWithRegistries(
+//       NewExtensionRegistryWithDefaults(),
+//       NewKnownTypeRegistryWithDefaults())
+func NewMessageFactoryWithDefaults() *MessageFactory {
+	return NewMessageFactoryWithRegistries(NewExtensionRegistryWithDefaults(), NewKnownTypeRegistryWithDefaults())
+}
+
+// NewMessageFactoryWithRegistries creates a new message factory with the given extension
+// and known type registries.
+func NewMessageFactoryWithRegistries(er *ExtensionRegistry, ktr *KnownTypeRegistry) *MessageFactory {
+	return &MessageFactory{
+		er:  er,
+		ktr: ktr,
+	}
+}
+
+// NewMessage creates a new empty message that corresponds to the given descriptor.
+// If the given descriptor describes a "known type" then that type is instantiated.
+// Otherwise, an empty dynamic message is returned.
+func (f *MessageFactory) NewMessage(md *desc.MessageDescriptor) proto.Message {
+	var ktr *KnownTypeRegistry
+	if f != nil {
+		ktr = f.ktr
+	}
+	if m := ktr.CreateIfKnown(md.GetFullyQualifiedName()); m != nil {
+		return m
+	}
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// NewDynamicMessage creates a new empty dynamic message that corresponds to the given
+// descriptor. This is like f.NewMessage(md) except the known type registry is not
+// consulted so the return value is always a dynamic message.
+//
+// This is also like dynamic.NewMessage(md) except that the returned message will use
+// this factory when creating other messages, like during de-serialization of fields
+// that are themselves message types.
+func (f *MessageFactory) NewDynamicMessage(md *desc.MessageDescriptor) *Message {
+	return NewMessageWithMessageFactory(md, f)
+}
+
+// GetKnownTypeRegistry returns the known type registry that this factory uses to
+// instantiate known (e.g. generated) message types.
+func (f *MessageFactory) GetKnownTypeRegistry() *KnownTypeRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.ktr
+}
+
+// GetExtensionRegistry returns the extension registry that this factory uses to
+// create dynamic messages. The registry is used by dynamic messages to recognize
+// and parse extension fields during de-serialization.
+func (f *MessageFactory) GetExtensionRegistry() *ExtensionRegistry {
+	if f == nil {
+		return nil
+	}
+	return f.er
+}
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+var typeOfWkt = reflect.TypeOf((*wkt)(nil)).Elem()
+
+// KnownTypeRegistry is a registry of known message types, as identified by their
+// fully-qualified name. A known message type is one for which a protoc-generated
+// struct exists, so a dynamic message is not necessary to represent it. A
+// MessageFactory uses a KnownTypeRegistry to decide whether to create a generated
+// struct or a dynamic message. The zero-value registry (including the behavior of
+// a nil pointer) only knows about the "well-known types" in protobuf. These
+// include only the wrapper types and a handful of other special types like Any,
+// Duration, and Timestamp.
+type KnownTypeRegistry struct {
+	excludeWkt     bool
+	includeDefault bool
+	mu             sync.RWMutex
+	types          map[string]reflect.Type
+}
+
+// NewKnownTypeRegistryWithDefaults creates a new registry that knows about all
+// "default" types (those for which protoc-generated code is statically linked
+// into the Go program).
+func NewKnownTypeRegistryWithDefaults() *KnownTypeRegistry {
+	return &KnownTypeRegistry{includeDefault: true}
+}
+
+// NewKnownTypeRegistryWithoutWellKnownTypes creates a new registry that does *not*
+// include the "well-known types" in protobuf. So even well-known types would be
+// represented by a dynamic message.
+func NewKnownTypeRegistryWithoutWellKnownTypes() *KnownTypeRegistry {
+	return &KnownTypeRegistry{excludeWkt: true}
+}
+
+// AddKnownType adds the types of the given messages as known types.
+func (r *KnownTypeRegistry) AddKnownType(kts ...proto.Message) {
+	r.mu.Lock()
+	defer r.mu.Unlock()
+	if r.types == nil {
+		r.types = map[string]reflect.Type{}
+	}
+	for _, kt := range kts {
+		r.types[proto.MessageName(kt)] = reflect.TypeOf(kt)
+	}
+}
+
+// CreateIfKnown will construct an instance of the given message if it is a known type.
+// If the given name is unknown, nil is returned.
+func (r *KnownTypeRegistry) CreateIfKnown(messageName string) proto.Message {
+	msgType := r.GetKnownType(messageName)
+	if msgType == nil {
+		return nil
+	}
+
+	if msgType.Kind() == reflect.Ptr {
+		return reflect.New(msgType.Elem()).Interface().(proto.Message)
+	} else {
+		return reflect.New(msgType).Elem().Interface().(proto.Message)
+	}
+}
+
+// GetKnownType will return the reflect.Type for the given message name if it is
+// known. If it is not known, nil is returned.
+func (r *KnownTypeRegistry) GetKnownType(messageName string) reflect.Type {
+	var msgType reflect.Type
+	if r == nil {
+		// a nil registry behaves the same as zero value instance: only know of well-known types
+		t := proto.MessageType(messageName)
+		if t != nil && t.Implements(typeOfWkt) {
+			msgType = t
+		}
+	} else {
+		if r.includeDefault {
+			msgType = proto.MessageType(messageName)
+		} else if !r.excludeWkt {
+			t := proto.MessageType(messageName)
+			if t != nil && t.Implements(typeOfWkt) {
+				msgType = t
+			}
+		}
+		if msgType == nil {
+			r.mu.RLock()
+			msgType = r.types[messageName]
+			r.mu.RUnlock()
+		}
+	}
+
+	return msgType
+}
diff --git a/vendor/github.com/jhump/protoreflect/dynamic/text.go b/vendor/github.com/jhump/protoreflect/dynamic/text.go
new file mode 100644
index 0000000..2d0fa04
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/dynamic/text.go
@@ -0,0 +1,1174 @@
+package dynamic
+
+// Marshalling and unmarshalling of dynamic messages to/from proto's standard text format
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"text/scanner"
+	"unicode"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// MarshalText serializes this message to bytes in the standard text format,
+// returning an error if the operation fails. The resulting bytes will be a
+// valid UTF8 string.
+//
+// This method uses a compact form: no newlines, and spaces between field
+// identifiers and values are elided.
+func (m *Message) MarshalText() ([]byte, error) {
+	var b indentBuffer
+	b.indentCount = -1 // no indentation
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+// MarshalTextIndent serializes this message to bytes in the standard text
+// format, returning an error if the operation fails. The resulting bytes will
+// be a valid UTF8 string.
+//
+// This method uses a "pretty-printed" form, with each field on its own line and
+// spaces between field identifiers and values.
+func (m *Message) MarshalTextIndent() ([]byte, error) {
+	var b indentBuffer
+	b.indent = "  " // TODO: option for indent?
+	if err := m.marshalText(&b); err != nil {
+		return nil, err
+	}
+	return b.Bytes(), nil
+}
+
+func (m *Message) marshalText(b *indentBuffer) error {
+	// TODO: option for emitting extended Any format?
+	first := true
+	// first the known fields
+	for _, tag := range m.knownFieldTags() {
+		itag := int32(tag)
+		v := m.values[itag]
+		fd := m.FindFieldDescriptor(itag)
+		if fd.IsMap() {
+			md := fd.GetMessageType()
+			kfd := md.FindFieldByNumber(1)
+			vfd := md.FindFieldByNumber(2)
+			mp := v.(map[interface{}]interface{})
+			keys := make([]interface{}, 0, len(mp))
+			for k := range mp {
+				keys = append(keys, k)
+			}
+			sort.Sort(sortable(keys))
+			for _, mk := range keys {
+				mv := mp[mk]
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldMapEntryText(b, fd, kfd, mk, vfd, mv)
+				if err != nil {
+					return err
+				}
+			}
+		} else if fd.IsRepeated() {
+			sl := v.([]interface{})
+			for _, slv := range sl {
+				err := b.maybeNext(&first)
+				if err != nil {
+					return err
+				}
+				err = marshalKnownFieldText(b, fd, slv)
+				if err != nil {
+					return err
+				}
+			}
+		} else {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			err = marshalKnownFieldText(b, fd, v)
+			if err != nil {
+				return err
+			}
+		}
+	}
+	// then the unknown fields
+	for _, tag := range m.unknownFieldTags() {
+		itag := int32(tag)
+		ufs := m.unknownFields[itag]
+		for _, uf := range ufs {
+			err := b.maybeNext(&first)
+			if err != nil {
+				return err
+			}
+			_, err = fmt.Fprintf(b, "%d", tag)
+			if err != nil {
+				return err
+			}
+			if uf.Encoding == proto.WireStartGroup {
+				err = b.WriteByte('{')
+				if err != nil {
+					return err
+				}
+				err = b.start()
+				if err != nil {
+					return err
+				}
+				in := newCodedBuffer(uf.Contents)
+				err = marshalUnknownGroupText(b, in, true)
+				if err != nil {
+					return err
+				}
+				err = b.end()
+				if err != nil {
+					return err
+				}
+				err = b.WriteByte('}')
+				if err != nil {
+					return err
+				}
+			} else {
+				err = b.sep()
+				if err != nil {
+					return err
+				}
+				if uf.Encoding == proto.WireBytes {
+					err = writeString(b, string(uf.Contents))
+					if err != nil {
+						return err
+					}
+				} else {
+					_, err = b.WriteString(strconv.FormatUint(uf.Value, 10))
+					if err != nil {
+						return err
+					}
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func marshalKnownFieldMapEntryText(b *indentBuffer, fd *desc.FieldDescriptor, kfd *desc.FieldDescriptor, mk interface{}, vfd *desc.FieldDescriptor, mv interface{}) error {
+	var name string
+	if fd.IsExtension() {
+		name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+	} else {
+		name = fd.GetName()
+	}
+	_, err := b.WriteString(name)
+	if err != nil {
+		return err
+	}
+	err = b.sep()
+	if err != nil {
+		return err
+	}
+
+	err = b.WriteByte('<')
+	if err != nil {
+		return err
+	}
+	err = b.start()
+	if err != nil {
+		return err
+	}
+
+	err = marshalKnownFieldText(b, kfd, mk)
+	if err != nil {
+		return err
+	}
+	err = b.next()
+	if err != nil {
+		return err
+	}
+	err = marshalKnownFieldText(b, vfd, mv)
+	if err != nil {
+		return err
+	}
+
+	err = b.end()
+	if err != nil {
+		return err
+	}
+	return b.WriteByte('>')
+}
+
+func marshalKnownFieldText(b *indentBuffer, fd *desc.FieldDescriptor, v interface{}) error {
+	group := fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP
+	if group {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetMessageType().GetFullyQualifiedName())
+		} else {
+			name = fd.GetMessageType().GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+	} else {
+		var name string
+		if fd.IsExtension() {
+			name = fmt.Sprintf("[%s]", fd.GetFullyQualifiedName())
+		} else {
+			name = fd.GetName()
+		}
+		_, err := b.WriteString(name)
+		if err != nil {
+			return err
+		}
+		err = b.sep()
+		if err != nil {
+			return err
+		}
+	}
+	rv := reflect.ValueOf(v)
+	switch rv.Kind() {
+	case reflect.Int32, reflect.Int64:
+		ed := fd.GetEnumType()
+		if ed != nil {
+			n := int32(rv.Int())
+			vd := ed.FindValueByNumber(n)
+			if vd == nil {
+				_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+				return err
+			} else {
+				_, err := b.WriteString(vd.GetName())
+				return err
+			}
+		} else {
+			_, err := b.WriteString(strconv.FormatInt(rv.Int(), 10))
+			return err
+		}
+	case reflect.Uint32, reflect.Uint64:
+		_, err := b.WriteString(strconv.FormatUint(rv.Uint(), 10))
+		return err
+	case reflect.Float32, reflect.Float64:
+		f := rv.Float()
+		var str string
+		if math.IsNaN(f) {
+			str = "nan"
+		} else if math.IsInf(f, 1) {
+			str = "inf"
+		} else if math.IsInf(f, -1) {
+			str = "-inf"
+		} else {
+			var bits int
+			if rv.Kind() == reflect.Float32 {
+				bits = 32
+			} else {
+				bits = 64
+			}
+			str = strconv.FormatFloat(rv.Float(), 'g', -1, bits)
+		}
+		_, err := b.WriteString(str)
+		return err
+	case reflect.Bool:
+		_, err := b.WriteString(strconv.FormatBool(rv.Bool()))
+		return err
+	case reflect.Slice:
+		return writeString(b, string(rv.Bytes()))
+	case reflect.String:
+		return writeString(b, rv.String())
+	default:
+		var err error
+		if group {
+			err = b.WriteByte('{')
+		} else {
+			err = b.WriteByte('<')
+		}
+		if err != nil {
+			return err
+		}
+		err = b.start()
+		if err != nil {
+			return err
+		}
+		// must be a message
+		if dm, ok := v.(*Message); ok {
+			err = dm.marshalText(b)
+			if err != nil {
+				return err
+			}
+		} else {
+			err = proto.CompactText(b, v.(proto.Message))
+			if err != nil {
+				return err
+			}
+		}
+		err = b.end()
+		if err != nil {
+			return err
+		}
+		if group {
+			return b.WriteByte('}')
+		} else {
+			return b.WriteByte('>')
+		}
+	}
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(b *indentBuffer, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := b.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = b.WriteString("\\n")
+		case '\r':
+			_, err = b.WriteString("\\r")
+		case '\t':
+			_, err = b.WriteString("\\t")
+		case '"':
+			_, err = b.WriteString("\\")
+		case '\\':
+			_, err = b.WriteString("\\\\")
+		default:
+			if c >= 0x20 && c < 0x7f {
+				err = b.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(b, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return b.WriteByte('"')
+}
+
+func marshalUnknownGroupText(b *indentBuffer, in *codedBuffer, topLevel bool) error {
+	first := true
+	for {
+		if in.eof() {
+			if topLevel {
+				return nil
+			}
+			// this is a nested message: we are expecting an end-group tag, not EOF!
+			return io.ErrUnexpectedEOF
+		}
+		tag, wireType, err := in.decodeTagAndWireType()
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireEndGroup {
+			return nil
+		}
+		err = b.maybeNext(&first)
+		if err != nil {
+			return err
+		}
+		_, err = fmt.Fprintf(b, "%d", tag)
+		if err != nil {
+			return err
+		}
+		if wireType == proto.WireStartGroup {
+			err = b.WriteByte('{')
+			if err != nil {
+				return err
+			}
+			err = b.start()
+			if err != nil {
+				return err
+			}
+			err = marshalUnknownGroupText(b, in, false)
+			if err != nil {
+				return err
+			}
+			err = b.end()
+			if err != nil {
+				return err
+			}
+			err = b.WriteByte('}')
+			if err != nil {
+				return err
+			}
+			continue
+		} else {
+			err = b.sep()
+			if err != nil {
+				return err
+			}
+			if wireType == proto.WireBytes {
+				contents, err := in.decodeRawBytes(false)
+				if err != nil {
+					return err
+				}
+				err = writeString(b, string(contents))
+				if err != nil {
+					return err
+				}
+			} else {
+				var v uint64
+				switch wireType {
+				case proto.WireVarint:
+					v, err = in.decodeVarint()
+				case proto.WireFixed32:
+					v, err = in.decodeFixed32()
+				case proto.WireFixed64:
+					v, err = in.decodeFixed64()
+				default:
+					return proto.ErrInternalBadWireType
+				}
+				if err != nil {
+					return err
+				}
+				_, err = b.WriteString(strconv.FormatUint(v, 10))
+				if err != nil {
+					return err
+				}
+			}
+		}
+	}
+}
+
+// UnmarshalText de-serializes the message that is present, in text format, in
+// the given bytes into this message. It first resets the current message. It
+// returns an error if the given bytes do not contain a valid encoding of this
+// message type in the standard text format
+func (m *Message) UnmarshalText(text []byte) error {
+	m.Reset()
+	if err := m.UnmarshalMergeText(text); err != nil {
+		return err
+	}
+	return m.Validate()
+}
+
+// UnmarshalMergeText de-serializes the message that is present, in text format,
+// in the given bytes into this message. Unlike UnmarshalText, it does not first
+// reset the message, instead merging the data in the given bytes into the
+// existing data in this message.
+func (m *Message) UnmarshalMergeText(text []byte) error {
+	return m.unmarshalText(newReader(text), tokenEOF)
+}
+
+func (m *Message) unmarshalText(tr *txtReader, end tokenType) error {
+	for {
+		tok := tr.next()
+		if tok.tokTyp == end {
+			return nil
+		}
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		var fd *desc.FieldDescriptor
+		var extendedAnyType *desc.MessageDescriptor
+		if tok.tokTyp == tokenInt {
+			// tag number (indicates unknown field)
+			tag, err := strconv.ParseInt(tok.val.(string), 10, 32)
+			if err != nil {
+				return err
+			}
+			itag := int32(tag)
+			fd = m.FindFieldDescriptor(itag)
+			if fd == nil {
+				// can't parse the value w/out field descriptor, so skip it
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				} else if tok.tokTyp == tokenOpenBrace {
+					if err := skipMessageText(tr, true); err != nil {
+						return err
+					}
+				} else if tok.tokTyp == tokenColon {
+					if err := skipFieldValueText(tr); err != nil {
+						return err
+					}
+				} else {
+					return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+				}
+				tok = tr.peek()
+				if tok.tokTyp.IsSep() {
+					tr.next() // consume separator
+				}
+				continue
+			}
+		} else {
+			fieldName, err := unmarshalFieldNameText(tr, tok)
+			if err != nil {
+				return err
+			}
+			fd = m.FindFieldDescriptorByName(fieldName)
+			if fd == nil {
+				// See if it's a group name
+				for _, field := range m.md.GetFields() {
+					if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetMessageType().GetName() == fieldName {
+						fd = field
+						break
+					}
+				}
+				if fd == nil {
+					// maybe this is an extended Any
+					if m.md.GetFullyQualifiedName() == "google.protobuf.Any" && fieldName[0] == '[' && strings.Contains(fieldName, "/") {
+						// strip surrounding "[" and "]" and extract type name from URL
+						typeUrl := fieldName[1 : len(fieldName)-1]
+						mname := typeUrl
+						if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+							mname = mname[slash+1:]
+						}
+						// TODO: add a way to weave an AnyResolver to this point
+						extendedAnyType = findMessageDescriptor(mname, m.md.GetFile())
+						if extendedAnyType == nil {
+							return textError(tok, "could not parse Any with unknown type URL %q", fieldName)
+						}
+						// field 1 is "type_url"
+						typeUrlField := m.md.FindFieldByNumber(1)
+						if err := m.TrySetField(typeUrlField, typeUrl); err != nil {
+							return err
+						}
+					} else {
+						// TODO: add a flag to just ignore unrecognized field names
+						return textError(tok, "%q is not a recognized field name of %q", fieldName, m.md.GetFullyQualifiedName())
+					}
+				}
+			}
+		}
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		}
+		if extendedAnyType != nil {
+			// consume optional colon; make sure this is a "start message" token
+			if tok.tokTyp == tokenColon {
+				tok = tr.next()
+				if tok.tokTyp == tokenEOF {
+					return io.ErrUnexpectedEOF
+				}
+			}
+			if tok.tokTyp.EndToken() == tokenError {
+				return textError(tok, "Expecting a '<' or '{'; instead got %q", tok.txt)
+			}
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(extendedAnyType)
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			// now we marshal the message to bytes and store in the Any
+			b, err := g.Marshal()
+			if err != nil {
+				return err
+			}
+			// field 2 is "value"
+			anyValueField := m.md.FindFieldByNumber(2)
+			if err := m.TrySetField(anyValueField, b); err != nil {
+				return err
+			}
+
+		} else if (fd.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP ||
+			fd.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE) &&
+			tok.tokTyp.EndToken() != tokenError {
+
+			// TODO: use mf.NewMessage and, if not a dynamic message, use proto.UnmarshalText to unmarshal it
+			g := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := g.unmarshalText(tr, tok.tokTyp.EndToken()); err != nil {
+				return err
+			}
+			if fd.IsRepeated() {
+				if err := m.TryAddRepeatedField(fd, g); err != nil {
+					return err
+				}
+			} else {
+				if err := m.TrySetField(fd, g); err != nil {
+					return err
+				}
+			}
+		} else {
+			if tok.tokTyp != tokenColon {
+				return textError(tok, "Expecting a colon ':'; instead got %q", tok.txt)
+			}
+			if err := m.unmarshalFieldValueText(fd, tr); err != nil {
+				return err
+			}
+		}
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+func findMessageDescriptor(name string, fd *desc.FileDescriptor) *desc.MessageDescriptor {
+	md := findMessageInTransitiveDeps(name, fd, map[*desc.FileDescriptor]struct{}{})
+	if md == nil {
+		// couldn't find it; see if we have this message linked in
+		md, _ = desc.LoadMessageDescriptor(name)
+	}
+	return md
+}
+
+func findMessageInTransitiveDeps(name string, fd *desc.FileDescriptor, seen map[*desc.FileDescriptor]struct{}) *desc.MessageDescriptor {
+	if _, ok := seen[fd]; ok {
+		// already checked this file
+		return nil
+	}
+	seen[fd] = struct{}{}
+	md := fd.FindMessage(name)
+	if md != nil {
+		return md
+	}
+	// not in this file so recursively search its deps
+	for _, dep := range fd.GetDependencies() {
+		md = findMessageInTransitiveDeps(name, dep, seen)
+		if md != nil {
+			return md
+		}
+	}
+	// couldn't find it
+	return nil
+}
+
+func textError(tok *token, format string, args ...interface{}) error {
+	var msg string
+	if tok.tokTyp == tokenError {
+		msg = tok.val.(error).Error()
+	} else {
+		msg = fmt.Sprintf(format, args...)
+	}
+	return fmt.Errorf("line %d, col %d: %s", tok.pos.Line, tok.pos.Column, msg)
+}
+
+type setFunction func(*Message, *desc.FieldDescriptor, interface{}) error
+
+func (m *Message) unmarshalFieldValueText(fd *desc.FieldDescriptor, tr *txtReader) error {
+	var set setFunction
+	if fd.IsRepeated() {
+		set = (*Message).addRepeatedField
+	} else {
+		set = mergeField
+	}
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := m.unmarshalFieldElementText(fd, tr, set); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+		}
+	}
+	return m.unmarshalFieldElementText(fd, tr, set)
+}
+
+func (m *Message) unmarshalFieldElementText(fd *desc.FieldDescriptor, tr *txtReader, set setFunction) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	}
+
+	var expected string
+	switch fd.GetType() {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		if tok.tokTyp == tokenIdent {
+			if tok.val.(string) == "true" {
+				return set(m, fd, true)
+			} else if tok.val.(string) == "false" {
+				return set(m, fd, false)
+			}
+		}
+		expected = "boolean value"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, []byte(tok.val.(string)))
+		}
+		expected = "bytes string value"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		if tok.tokTyp == tokenString {
+			return set(m, fd, tok.val)
+		}
+		expected = "string value"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, float32(tok.val.(float64)))
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, float32(f))
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, float32(math.Inf(1)))
+			} else if ident == "nan" {
+				return set(m, fd, float32(math.NaN()))
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, float32(math.Inf(-1)))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		switch tok.tokTyp {
+		case tokenFloat:
+			return set(m, fd, tok.val)
+		case tokenInt:
+			if f, err := strconv.ParseFloat(tok.val.(string), 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, f)
+			}
+		case tokenIdent:
+			ident := strings.ToLower(tok.val.(string))
+			if ident == "inf" {
+				return set(m, fd, math.Inf(1))
+			} else if ident == "nan" {
+				return set(m, fd, math.NaN())
+			}
+		case tokenMinus:
+			peeked := tr.peek()
+			if peeked.tokTyp == tokenIdent {
+				ident := strings.ToLower(peeked.val.(string))
+				if ident == "inf" {
+					tr.next() // consume peeked token
+					return set(m, fd, math.Inf(-1))
+				}
+			}
+		}
+		expected = "float value"
+	case descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_SINT64,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, uint32(i))
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseUint(tok.val.(string), 10, 64); err != nil {
+				return err
+			} else {
+				return set(m, fd, i)
+			}
+		}
+		expected = "unsigned int value"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		if tok.tokTyp == tokenIdent {
+			// TODO: add a flag to just ignore unrecognized enum value names?
+			vd := fd.GetEnumType().FindValueByName(tok.val.(string))
+			if vd != nil {
+				return set(m, fd, vd.GetNumber())
+			}
+		} else if tok.tokTyp == tokenInt {
+			if i, err := strconv.ParseInt(tok.val.(string), 10, 32); err != nil {
+				return err
+			} else {
+				return set(m, fd, int32(i))
+			}
+		}
+		expected = fmt.Sprintf("enum %s value", fd.GetEnumType().GetFullyQualifiedName())
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE,
+		descriptor.FieldDescriptorProto_TYPE_GROUP:
+
+		endTok := tok.tokTyp.EndToken()
+		if endTok != tokenError {
+			dm := m.mf.NewDynamicMessage(fd.GetMessageType())
+			if err := dm.unmarshalText(tr, endTok); err != nil {
+				return err
+			}
+			// TODO: ideally we would use mf.NewMessage and, if not a dynamic message, use
+			// proto package to unmarshal it. But the text parser isn't particularly amenable
+			// to that, so we instead convert a dynamic message to a generated one if the
+			// known-type registry knows about the generated type...
+			var ktr *KnownTypeRegistry
+			if m.mf != nil {
+				ktr = m.mf.ktr
+			}
+			pm := ktr.CreateIfKnown(fd.GetMessageType().GetFullyQualifiedName())
+			if pm != nil {
+				if err := dm.ConvertTo(pm); err != nil {
+					return set(m, fd, pm)
+				}
+			}
+			return set(m, fd, dm)
+		}
+		expected = fmt.Sprintf("message %s value", fd.GetMessageType().GetFullyQualifiedName())
+	default:
+		return fmt.Errorf("field %q of message %q has unrecognized type: %v", fd.GetFullyQualifiedName(), m.md.GetFullyQualifiedName(), fd.GetType())
+	}
+
+	// if we get here, token was wrong type; create error message
+	var article string
+	if strings.Contains("aieou", expected[0:1]) {
+		article = "an"
+	} else {
+		article = "a"
+	}
+	return textError(tok, "Expecting %s %s; got %q", article, expected, tok.txt)
+}
+
+func unmarshalFieldNameText(tr *txtReader, tok *token) (string, error) {
+	if tok.tokTyp == tokenOpenBracket || tok.tokTyp == tokenOpenParen {
+		// extension name
+		var closeType tokenType
+		var closeChar string
+		if tok.tokTyp == tokenOpenBracket {
+			closeType = tokenCloseBracket
+			closeChar = "close bracket ']'"
+		} else {
+			closeType = tokenCloseParen
+			closeChar = "close paren ')'"
+		}
+		// must be followed by an identifier
+		idents := make([]string, 0, 1)
+		for {
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp != tokenIdent {
+				return "", textError(tok, "Expecting an identifier; instead got %q", tok.txt)
+			}
+			idents = append(idents, tok.val.(string))
+			// and then close bracket/paren, or "/" to keep adding URL elements to name
+			tok = tr.next()
+			if tok.tokTyp == tokenEOF {
+				return "", io.ErrUnexpectedEOF
+			} else if tok.tokTyp == closeType {
+				break
+			} else if tok.tokTyp != tokenSlash {
+				return "", textError(tok, "Expecting a %s; instead got %q", closeChar, tok.txt)
+			}
+		}
+		return "[" + strings.Join(idents, "/") + "]", nil
+	} else if tok.tokTyp == tokenIdent {
+		// normal field name
+		return tok.val.(string), nil
+	} else {
+		return "", textError(tok, "Expecting an identifier or tag number; instead got %q", tok.txt)
+	}
+}
+
+func skipFieldNameText(tr *txtReader) error {
+	tok := tr.next()
+	if tok.tokTyp == tokenEOF {
+		return io.ErrUnexpectedEOF
+	} else if tok.tokTyp == tokenInt || tok.tokTyp == tokenIdent {
+		return nil
+	} else {
+		_, err := unmarshalFieldNameText(tr, tok)
+		return err
+	}
+}
+
+func skipFieldValueText(tr *txtReader) error {
+	tok := tr.peek()
+	if tok.tokTyp == tokenOpenBracket {
+		tr.next() // consume tok
+		for {
+			if err := skipFieldElementText(tr); err != nil {
+				return err
+			}
+			tok = tr.peek()
+			if tok.tokTyp == tokenCloseBracket {
+				tr.next() // consume tok
+				return nil
+			} else if tok.tokTyp.IsSep() {
+				tr.next() // consume separator
+			}
+
+		}
+	}
+	return skipFieldElementText(tr)
+}
+
+func skipFieldElementText(tr *txtReader) error {
+	tok := tr.next()
+	switch tok.tokTyp {
+	case tokenEOF:
+		return io.ErrUnexpectedEOF
+	case tokenInt, tokenFloat, tokenString, tokenIdent:
+		return nil
+	case tokenOpenAngle:
+		return skipMessageText(tr, false)
+	default:
+		return textError(tok, "Expecting an angle bracket '<' or a value; instead got %q", tok.txt)
+	}
+}
+
+func skipMessageText(tr *txtReader, isGroup bool) error {
+	for {
+		tok := tr.peek()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if isGroup && tok.tokTyp == tokenCloseBrace {
+			return nil
+		} else if !isGroup && tok.tokTyp == tokenCloseAngle {
+			return nil
+		}
+
+		// field name or tag
+		if err := skipFieldNameText(tr); err != nil {
+			return err
+		}
+
+		// field value
+		tok = tr.next()
+		if tok.tokTyp == tokenEOF {
+			return io.ErrUnexpectedEOF
+		} else if tok.tokTyp == tokenOpenBrace {
+			if err := skipMessageText(tr, true); err != nil {
+				return err
+			}
+		} else if tok.tokTyp == tokenColon {
+			if err := skipFieldValueText(tr); err != nil {
+				return err
+			}
+		} else {
+			return textError(tok, "Expecting a colon ':' or brace '{'; instead got %q", tok.txt)
+		}
+
+		tok = tr.peek()
+		if tok.tokTyp.IsSep() {
+			tr.next() // consume separator
+		}
+	}
+}
+
+type tokenType int
+
+const (
+	tokenError tokenType = iota
+	tokenEOF
+	tokenIdent
+	tokenString
+	tokenInt
+	tokenFloat
+	tokenColon
+	tokenComma
+	tokenSemiColon
+	tokenOpenBrace
+	tokenCloseBrace
+	tokenOpenBracket
+	tokenCloseBracket
+	tokenOpenAngle
+	tokenCloseAngle
+	tokenOpenParen
+	tokenCloseParen
+	tokenSlash
+	tokenMinus
+)
+
+func (t tokenType) IsSep() bool {
+	return t == tokenComma || t == tokenSemiColon
+}
+
+func (t tokenType) EndToken() tokenType {
+	switch t {
+	case tokenOpenAngle:
+		return tokenCloseAngle
+	case tokenOpenBrace:
+		return tokenCloseBrace
+	default:
+		return tokenError
+	}
+}
+
+type token struct {
+	tokTyp tokenType
+	val    interface{}
+	txt    string
+	pos    scanner.Position
+}
+
+type txtReader struct {
+	scanner    scanner.Scanner
+	peeked     token
+	havePeeked bool
+}
+
+func newReader(text []byte) *txtReader {
+	sc := scanner.Scanner{}
+	sc.Init(bytes.NewReader(text))
+	sc.Mode = scanner.ScanIdents | scanner.ScanInts | scanner.ScanFloats | scanner.ScanChars |
+		scanner.ScanStrings | scanner.ScanComments | scanner.SkipComments
+	// identifiers are same restrictions as Go identifiers, except we also allow dots since
+	// we accept fully-qualified names
+	sc.IsIdentRune = func(ch rune, i int) bool {
+		return ch == '_' || unicode.IsLetter(ch) ||
+			(i > 0 && unicode.IsDigit(ch)) ||
+			(i > 0 && ch == '.')
+	}
+	// ignore errors; we handle them if/when we see malformed tokens
+	sc.Error = func(s *scanner.Scanner, msg string) {}
+	return &txtReader{scanner: sc}
+}
+
+func (p *txtReader) peek() *token {
+	if p.havePeeked {
+		return &p.peeked
+	}
+	t := p.scanner.Scan()
+	if t == scanner.EOF {
+		p.peeked.tokTyp = tokenEOF
+		p.peeked.val = nil
+		p.peeked.txt = ""
+		p.peeked.pos = p.scanner.Position
+	} else if err := p.processToken(t, p.scanner.TokenText(), p.scanner.Position); err != nil {
+		p.peeked.tokTyp = tokenError
+		p.peeked.val = err
+	}
+	p.havePeeked = true
+	return &p.peeked
+}
+
+func (p *txtReader) processToken(t rune, text string, pos scanner.Position) error {
+	p.peeked.pos = pos
+	p.peeked.txt = text
+	switch t {
+	case scanner.Ident:
+		p.peeked.tokTyp = tokenIdent
+		p.peeked.val = text
+	case scanner.Int:
+		p.peeked.tokTyp = tokenInt
+		p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+	case scanner.Float:
+		p.peeked.tokTyp = tokenFloat
+		var err error
+		if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+			return err
+		}
+	case scanner.Char, scanner.String:
+		p.peeked.tokTyp = tokenString
+		var err error
+		if p.peeked.val, err = strconv.Unquote(text); err != nil {
+			return err
+		}
+	case '-': // unary minus, for negative ints and floats
+		ch := p.scanner.Peek()
+		if ch < '0' || ch > '9' {
+			p.peeked.tokTyp = tokenMinus
+			p.peeked.val = '-'
+		} else {
+			t := p.scanner.Scan()
+			if t == scanner.EOF {
+				return io.ErrUnexpectedEOF
+			} else if t == scanner.Float {
+				p.peeked.tokTyp = tokenFloat
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				var err error
+				if p.peeked.val, err = strconv.ParseFloat(text, 64); err != nil {
+					p.peeked.pos = p.scanner.Position
+					return err
+				}
+			} else if t == scanner.Int {
+				p.peeked.tokTyp = tokenInt
+				text += p.scanner.TokenText()
+				p.peeked.txt = text
+				p.peeked.val = text // can't parse the number because we don't know if it's signed or unsigned
+			} else {
+				p.peeked.pos = p.scanner.Position
+				return fmt.Errorf("expecting an int or float but got %q", p.scanner.TokenText())
+			}
+		}
+	case ':':
+		p.peeked.tokTyp = tokenColon
+		p.peeked.val = ':'
+	case ',':
+		p.peeked.tokTyp = tokenComma
+		p.peeked.val = ','
+	case ';':
+		p.peeked.tokTyp = tokenSemiColon
+		p.peeked.val = ';'
+	case '{':
+		p.peeked.tokTyp = tokenOpenBrace
+		p.peeked.val = '{'
+	case '}':
+		p.peeked.tokTyp = tokenCloseBrace
+		p.peeked.val = '}'
+	case '<':
+		p.peeked.tokTyp = tokenOpenAngle
+		p.peeked.val = '<'
+	case '>':
+		p.peeked.tokTyp = tokenCloseAngle
+		p.peeked.val = '>'
+	case '[':
+		p.peeked.tokTyp = tokenOpenBracket
+		p.peeked.val = '['
+	case ']':
+		p.peeked.tokTyp = tokenCloseBracket
+		p.peeked.val = ']'
+	case '(':
+		p.peeked.tokTyp = tokenOpenParen
+		p.peeked.val = '('
+	case ')':
+		p.peeked.tokTyp = tokenCloseParen
+		p.peeked.val = ')'
+	case '/':
+		// only allowed to separate URL components in expanded Any format
+		p.peeked.tokTyp = tokenSlash
+		p.peeked.val = '/'
+	default:
+		return fmt.Errorf("invalid character: %c", t)
+	}
+	return nil
+}
+
+func (p *txtReader) next() *token {
+	t := p.peek()
+	if t.tokTyp != tokenEOF && t.tokTyp != tokenError {
+		p.havePeeked = false
+	}
+	return t
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/client.go b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
new file mode 100644
index 0000000..3fca3eb
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/client.go
@@ -0,0 +1,666 @@
+package grpcreflect
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"runtime"
+	"sync"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/codes"
+	rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha"
+	"google.golang.org/grpc/status"
+
+	"github.com/jhump/protoreflect/desc"
+	"github.com/jhump/protoreflect/internal"
+)
+
+// elementNotFoundError is the error returned by reflective operations where the
+// server does not recognize a given file name, symbol name, or extension.
+type elementNotFoundError struct {
+	name    string
+	kind    elementKind
+	symType symbolType // only used when kind == elementKindSymbol
+	tag     int32      // only used when kind == elementKindExtension
+
+	// only errors with a kind of elementKindFile will have a cause, which means
+	// the named file count not be resolved because of a dependency that could
+	// not be found where cause describes the missing dependency
+	cause *elementNotFoundError
+}
+
+type elementKind int
+
+const (
+	elementKindSymbol elementKind = iota
+	elementKindFile
+	elementKindExtension
+)
+
+type symbolType string
+
+const (
+	symbolTypeService = "Service"
+	symbolTypeMessage = "Message"
+	symbolTypeEnum    = "Enum"
+	symbolTypeUnknown = "Symbol"
+)
+
+func symbolNotFound(symbol string, symType symbolType, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: symbol, symType: symType, kind: elementKindSymbol, cause: cause}
+}
+
+func extensionNotFound(extendee string, tag int32, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: extendee, tag: tag, kind: elementKindExtension, cause: cause}
+}
+
+func fileNotFound(file string, cause *elementNotFoundError) error {
+	return &elementNotFoundError{name: file, kind: elementKindFile, cause: cause}
+}
+
+func (e *elementNotFoundError) Error() string {
+	first := true
+	var b bytes.Buffer
+	for ; e != nil; e = e.cause {
+		if first {
+			first = false
+		} else {
+			fmt.Fprint(&b, "\ncaused by: ")
+		}
+		switch e.kind {
+		case elementKindSymbol:
+			fmt.Fprintf(&b, "%s not found: %s", e.symType, e.name)
+		case elementKindExtension:
+			fmt.Fprintf(&b, "Extension not found: tag %d for %s", e.tag, e.name)
+		default:
+			fmt.Fprintf(&b, "File not found: %s", e.name)
+		}
+	}
+	return b.String()
+}
+
+// IsElementNotFoundError determines if the given error indicates that a file
+// name, symbol name, or extension field was could not be found by the server.
+func IsElementNotFoundError(err error) bool {
+	_, ok := err.(*elementNotFoundError)
+	return ok
+}
+
+// ProtocolError is an error returned when the server sends a response of the
+// wrong type.
+type ProtocolError struct {
+	missingType reflect.Type
+}
+
+func (p ProtocolError) Error() string {
+	return fmt.Sprintf("Protocol error: response was missing %v", p.missingType)
+}
+
+type extDesc struct {
+	extendedMessageName string
+	extensionNumber     int32
+}
+
+// Client is a client connection to a server for performing reflection calls
+// and resolving remote symbols.
+type Client struct {
+	ctx  context.Context
+	stub rpb.ServerReflectionClient
+
+	connMu sync.Mutex
+	cancel context.CancelFunc
+	stream rpb.ServerReflection_ServerReflectionInfoClient
+
+	cacheMu          sync.RWMutex
+	protosByName     map[string]*dpb.FileDescriptorProto
+	filesByName      map[string]*desc.FileDescriptor
+	filesBySymbol    map[string]*desc.FileDescriptor
+	filesByExtension map[extDesc]*desc.FileDescriptor
+}
+
+// NewClient creates a new Client with the given root context and using the
+// given RPC stub for talking to the server.
+func NewClient(ctx context.Context, stub rpb.ServerReflectionClient) *Client {
+	cr := &Client{
+		ctx:              ctx,
+		stub:             stub,
+		protosByName:     map[string]*dpb.FileDescriptorProto{},
+		filesByName:      map[string]*desc.FileDescriptor{},
+		filesBySymbol:    map[string]*desc.FileDescriptor{},
+		filesByExtension: map[extDesc]*desc.FileDescriptor{},
+	}
+	// don't leak a grpc stream
+	runtime.SetFinalizer(cr, (*Client).Reset)
+	return cr
+}
+
+// FileByFilename asks the server for a file descriptor for the proto file with
+// the given name.
+func (cr *Client) FileByFilename(filename string) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	if fd, ok := cr.filesByName[filename]; ok {
+		cr.cacheMu.RUnlock()
+		return fd, nil
+	}
+	fdp, ok := cr.protosByName[filename]
+	cr.cacheMu.RUnlock()
+	// not there? see if we've downloaded the proto
+	if ok {
+		return cr.descriptorFromProto(fdp)
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+			FileByFilename: filename,
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, filename, "")
+	if isNotFound(err) {
+		// file not found? see if we can look up via alternate name
+		if alternate, ok := internal.StdFileAliases[filename]; ok {
+			req := &rpb.ServerReflectionRequest{
+				MessageRequest: &rpb.ServerReflectionRequest_FileByFilename{
+					FileByFilename: alternate,
+				},
+			}
+			fd, err = cr.getAndCacheFileDescriptors(req, alternate, filename)
+			if isNotFound(err) {
+				err = fileNotFound(filename, nil)
+			}
+		} else {
+			err = fileNotFound(filename, nil)
+		}
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = fileNotFound(filename, e)
+	}
+	return fd, err
+}
+
+// FileContainingSymbol asks the server for a file descriptor for the proto file
+// that declares the given fully-qualified symbol.
+func (cr *Client) FileContainingSymbol(symbol string) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	fd, ok := cr.filesBySymbol[symbol]
+	cr.cacheMu.RUnlock()
+	if ok {
+		return fd, nil
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileContainingSymbol{
+			FileContainingSymbol: symbol,
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	if isNotFound(err) {
+		err = symbolNotFound(symbol, symbolTypeUnknown, nil)
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = symbolNotFound(symbol, symbolTypeUnknown, e)
+	}
+	return fd, err
+}
+
+// FileContainingExtension asks the server for a file descriptor for the proto
+// file that declares an extension with the given number for the given
+// fully-qualified message name.
+func (cr *Client) FileContainingExtension(extendedMessageName string, extensionNumber int32) (*desc.FileDescriptor, error) {
+	// hit the cache first
+	cr.cacheMu.RLock()
+	fd, ok := cr.filesByExtension[extDesc{extendedMessageName, extensionNumber}]
+	cr.cacheMu.RUnlock()
+	if ok {
+		return fd, nil
+	}
+
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_FileContainingExtension{
+			FileContainingExtension: &rpb.ExtensionRequest{
+				ContainingType:  extendedMessageName,
+				ExtensionNumber: extensionNumber,
+			},
+		},
+	}
+	fd, err := cr.getAndCacheFileDescriptors(req, "", "")
+	if isNotFound(err) {
+		err = extensionNotFound(extendedMessageName, extensionNumber, nil)
+	} else if e, ok := err.(*elementNotFoundError); ok {
+		err = extensionNotFound(extendedMessageName, extensionNumber, e)
+	}
+	return fd, err
+}
+
+func (cr *Client) getAndCacheFileDescriptors(req *rpb.ServerReflectionRequest, expectedName, alias string) (*desc.FileDescriptor, error) {
+	resp, err := cr.send(req)
+	if err != nil {
+		return nil, err
+	}
+
+	fdResp := resp.GetFileDescriptorResponse()
+	if fdResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(fdResp).Elem()}
+	}
+
+	// Response can contain the result file descriptor, but also its transitive
+	// deps. Furthermore, protocol states that subsequent requests do not need
+	// to send transitive deps that have been sent in prior responses. So we
+	// need to cache all file descriptors and then return the first one (which
+	// should be the answer). If we're looking for a file by name, we can be
+	// smarter and make sure to grab one by name instead of just grabbing the
+	// first one.
+	var firstFd *dpb.FileDescriptorProto
+	for _, fdBytes := range fdResp.FileDescriptorProto {
+		fd := &dpb.FileDescriptorProto{}
+		if err = proto.Unmarshal(fdBytes, fd); err != nil {
+			return nil, err
+		}
+
+		if expectedName != "" && alias != "" && expectedName != alias && fd.GetName() == expectedName {
+			// we found a file was aliased, so we need to update the proto to reflect that
+			fd.Name = proto.String(alias)
+		}
+
+		cr.cacheMu.Lock()
+		// see if this file was created and cached concurrently
+		if firstFd == nil {
+			if d, ok := cr.filesByName[fd.GetName()]; ok {
+				cr.cacheMu.Unlock()
+				return d, nil
+			}
+		}
+		// store in cache of raw descriptor protos, but don't overwrite existing protos
+		if existingFd, ok := cr.protosByName[fd.GetName()]; ok {
+			fd = existingFd
+		} else {
+			cr.protosByName[fd.GetName()] = fd
+		}
+		cr.cacheMu.Unlock()
+		if firstFd == nil {
+			firstFd = fd
+		}
+	}
+	if firstFd == nil {
+		return nil, &ProtocolError{reflect.TypeOf(firstFd).Elem()}
+	}
+
+	return cr.descriptorFromProto(firstFd)
+}
+
+func (cr *Client) descriptorFromProto(fd *dpb.FileDescriptorProto) (*desc.FileDescriptor, error) {
+	deps := make([]*desc.FileDescriptor, len(fd.GetDependency()))
+	for i, depName := range fd.GetDependency() {
+		if dep, err := cr.FileByFilename(depName); err != nil {
+			return nil, err
+		} else {
+			deps[i] = dep
+		}
+	}
+	d, err := desc.CreateFileDescriptor(fd, deps...)
+	if err != nil {
+		return nil, err
+	}
+	d = cr.cacheFile(d)
+	return d, nil
+}
+
+func (cr *Client) cacheFile(fd *desc.FileDescriptor) *desc.FileDescriptor {
+	cr.cacheMu.Lock()
+	defer cr.cacheMu.Unlock()
+
+	// cache file descriptor by name, but don't overwrite existing entry
+	// (existing entry could come from concurrent caller)
+	if existingFd, ok := cr.filesByName[fd.GetName()]; ok {
+		return existingFd
+	}
+	cr.filesByName[fd.GetName()] = fd
+
+	// also cache by symbols and extensions
+	for _, m := range fd.GetMessageTypes() {
+		cr.cacheMessageLocked(fd, m)
+	}
+	for _, e := range fd.GetEnumTypes() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		for _, v := range e.GetValues() {
+			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+		}
+	}
+	for _, e := range fd.GetExtensions() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+	}
+	for _, s := range fd.GetServices() {
+		cr.filesBySymbol[s.GetFullyQualifiedName()] = fd
+		for _, m := range s.GetMethods() {
+			cr.filesBySymbol[m.GetFullyQualifiedName()] = fd
+		}
+	}
+
+	return fd
+}
+
+func (cr *Client) cacheMessageLocked(fd *desc.FileDescriptor, md *desc.MessageDescriptor) {
+	cr.filesBySymbol[md.GetFullyQualifiedName()] = fd
+	for _, f := range md.GetFields() {
+		cr.filesBySymbol[f.GetFullyQualifiedName()] = fd
+	}
+	for _, o := range md.GetOneOfs() {
+		cr.filesBySymbol[o.GetFullyQualifiedName()] = fd
+	}
+	for _, e := range md.GetNestedEnumTypes() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		for _, v := range e.GetValues() {
+			cr.filesBySymbol[v.GetFullyQualifiedName()] = fd
+		}
+	}
+	for _, e := range md.GetNestedExtensions() {
+		cr.filesBySymbol[e.GetFullyQualifiedName()] = fd
+		cr.filesByExtension[extDesc{e.GetOwner().GetFullyQualifiedName(), e.GetNumber()}] = fd
+	}
+	for _, m := range md.GetNestedMessageTypes() {
+		cr.cacheMessageLocked(fd, m) // recurse
+	}
+}
+
+// AllExtensionNumbersForType asks the server for all known extension numbers
+// for the given fully-qualified message name.
+func (cr *Client) AllExtensionNumbersForType(extendedMessageName string) ([]int32, error) {
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_AllExtensionNumbersOfType{
+			AllExtensionNumbersOfType: extendedMessageName,
+		},
+	}
+	resp, err := cr.send(req)
+	if err != nil {
+		if isNotFound(err) {
+			return nil, symbolNotFound(extendedMessageName, symbolTypeMessage, nil)
+		}
+		return nil, err
+	}
+
+	extResp := resp.GetAllExtensionNumbersResponse()
+	if extResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(extResp).Elem()}
+	}
+	return extResp.ExtensionNumber, nil
+}
+
+// ListServices asks the server for the fully-qualified names of all exposed
+// services.
+func (cr *Client) ListServices() ([]string, error) {
+	req := &rpb.ServerReflectionRequest{
+		MessageRequest: &rpb.ServerReflectionRequest_ListServices{
+			// proto doesn't indicate any purpose for this value and server impl
+			// doesn't actually use it...
+			ListServices: "*",
+		},
+	}
+	resp, err := cr.send(req)
+	if err != nil {
+		return nil, err
+	}
+
+	listResp := resp.GetListServicesResponse()
+	if listResp == nil {
+		return nil, &ProtocolError{reflect.TypeOf(listResp).Elem()}
+	}
+	serviceNames := make([]string, len(listResp.Service))
+	for i, s := range listResp.Service {
+		serviceNames[i] = s.Name
+	}
+	return serviceNames, nil
+}
+
+func (cr *Client) send(req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	// we allow one immediate retry, in case we have a stale stream
+	// (e.g. closed by server)
+	resp, err := cr.doSend(true, req)
+	if err != nil {
+		return nil, err
+	}
+
+	// convert error response messages into errors
+	errResp := resp.GetErrorResponse()
+	if errResp != nil {
+		return nil, status.Errorf(codes.Code(errResp.ErrorCode), "%s", errResp.ErrorMessage)
+	}
+
+	return resp, nil
+}
+
+func isNotFound(err error) bool {
+	if err == nil {
+		return false
+	}
+	s, ok := status.FromError(err)
+	return ok && s.Code() == codes.NotFound
+}
+
+func (cr *Client) doSend(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	// TODO: Streams are thread-safe, so we shouldn't need to lock. But without locking, we'll need more machinery
+	// (goroutines and channels) to ensure that responses are correctly correlated with their requests and thus
+	// delivered in correct oder.
+	cr.connMu.Lock()
+	defer cr.connMu.Unlock()
+	return cr.doSendLocked(retry, req)
+}
+
+func (cr *Client) doSendLocked(retry bool, req *rpb.ServerReflectionRequest) (*rpb.ServerReflectionResponse, error) {
+	if err := cr.initStreamLocked(); err != nil {
+		return nil, err
+	}
+
+	if err := cr.stream.Send(req); err != nil {
+		if err == io.EOF {
+			// if send returns EOF, must call Recv to get real underlying error
+			_, err = cr.stream.Recv()
+		}
+		cr.resetLocked()
+		if retry {
+			return cr.doSendLocked(false, req)
+		}
+		return nil, err
+	}
+
+	if resp, err := cr.stream.Recv(); err != nil {
+		cr.resetLocked()
+		if retry {
+			return cr.doSendLocked(false, req)
+		}
+		return nil, err
+	} else {
+		return resp, nil
+	}
+}
+
+func (cr *Client) initStreamLocked() error {
+	if cr.stream != nil {
+		return nil
+	}
+	var newCtx context.Context
+	newCtx, cr.cancel = context.WithCancel(cr.ctx)
+	var err error
+	cr.stream, err = cr.stub.ServerReflectionInfo(newCtx)
+	return err
+}
+
+// Reset ensures that any active stream with the server is closed, releasing any
+// resources.
+func (cr *Client) Reset() {
+	cr.connMu.Lock()
+	defer cr.connMu.Unlock()
+	cr.resetLocked()
+}
+
+func (cr *Client) resetLocked() {
+	if cr.stream != nil {
+		cr.stream.CloseSend()
+		for {
+			// drain the stream, this covers io.EOF too
+			if _, err := cr.stream.Recv(); err != nil {
+				break
+			}
+		}
+		cr.stream = nil
+	}
+	if cr.cancel != nil {
+		cr.cancel()
+		cr.cancel = nil
+	}
+}
+
+// ResolveService asks the server to resolve the given fully-qualified service
+// name into a service descriptor.
+func (cr *Client) ResolveService(serviceName string) (*desc.ServiceDescriptor, error) {
+	file, err := cr.FileContainingSymbol(serviceName)
+	if err != nil {
+		return nil, setSymbolType(err, serviceName, symbolTypeService)
+	}
+	d := file.FindSymbol(serviceName)
+	if d == nil {
+		return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+	}
+	if s, ok := d.(*desc.ServiceDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(serviceName, symbolTypeService, nil)
+	}
+}
+
+// ResolveMessage asks the server to resolve the given fully-qualified message
+// name into a message descriptor.
+func (cr *Client) ResolveMessage(messageName string) (*desc.MessageDescriptor, error) {
+	file, err := cr.FileContainingSymbol(messageName)
+	if err != nil {
+		return nil, setSymbolType(err, messageName, symbolTypeMessage)
+	}
+	d := file.FindSymbol(messageName)
+	if d == nil {
+		return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+	}
+	if s, ok := d.(*desc.MessageDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(messageName, symbolTypeMessage, nil)
+	}
+}
+
+// ResolveEnum asks the server to resolve the given fully-qualified enum name
+// into an enum descriptor.
+func (cr *Client) ResolveEnum(enumName string) (*desc.EnumDescriptor, error) {
+	file, err := cr.FileContainingSymbol(enumName)
+	if err != nil {
+		return nil, setSymbolType(err, enumName, symbolTypeEnum)
+	}
+	d := file.FindSymbol(enumName)
+	if d == nil {
+		return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+	}
+	if s, ok := d.(*desc.EnumDescriptor); ok {
+		return s, nil
+	} else {
+		return nil, symbolNotFound(enumName, symbolTypeEnum, nil)
+	}
+}
+
+func setSymbolType(err error, name string, symType symbolType) error {
+	if e, ok := err.(*elementNotFoundError); ok {
+		if e.kind == elementKindSymbol && e.name == name && e.symType == symbolTypeUnknown {
+			e.symType = symType
+		}
+	}
+	return err
+}
+
+// ResolveEnumValues asks the server to resolve the given fully-qualified enum
+// name into a map of names to numbers that represents the enum's values.
+func (cr *Client) ResolveEnumValues(enumName string) (map[string]int32, error) {
+	enumDesc, err := cr.ResolveEnum(enumName)
+	if err != nil {
+		return nil, err
+	}
+	vals := map[string]int32{}
+	for _, valDesc := range enumDesc.GetValues() {
+		vals[valDesc.GetName()] = valDesc.GetNumber()
+	}
+	return vals, nil
+}
+
+// ResolveExtension asks the server to resolve the given extension number and
+// fully-qualified message name into a field descriptor.
+func (cr *Client) ResolveExtension(extendedType string, extensionNumber int32) (*desc.FieldDescriptor, error) {
+	file, err := cr.FileContainingExtension(extendedType, extensionNumber)
+	if err != nil {
+		return nil, err
+	}
+	d := findExtension(extendedType, extensionNumber, fileDescriptorExtensions{file})
+	if d == nil {
+		return nil, extensionNotFound(extendedType, extensionNumber, nil)
+	} else {
+		return d, nil
+	}
+}
+
+func findExtension(extendedType string, extensionNumber int32, scope extensionScope) *desc.FieldDescriptor {
+	// search extensions in this scope
+	for _, ext := range scope.extensions() {
+		if ext.GetNumber() == extensionNumber && ext.GetOwner().GetFullyQualifiedName() == extendedType {
+			return ext
+		}
+	}
+
+	// if not found, search nested scopes
+	for _, nested := range scope.nestedScopes() {
+		ext := findExtension(extendedType, extensionNumber, nested)
+		if ext != nil {
+			return ext
+		}
+	}
+
+	return nil
+}
+
+type extensionScope interface {
+	extensions() []*desc.FieldDescriptor
+	nestedScopes() []extensionScope
+}
+
+// fileDescriptorExtensions implements extensionHolder interface on top of
+// FileDescriptorProto
+type fileDescriptorExtensions struct {
+	proto *desc.FileDescriptor
+}
+
+func (fde fileDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+	return fde.proto.GetExtensions()
+}
+
+func (fde fileDescriptorExtensions) nestedScopes() []extensionScope {
+	scopes := make([]extensionScope, len(fde.proto.GetMessageTypes()))
+	for i, m := range fde.proto.GetMessageTypes() {
+		scopes[i] = msgDescriptorExtensions{m}
+	}
+	return scopes
+}
+
+// msgDescriptorExtensions implements extensionHolder interface on top of
+// DescriptorProto
+type msgDescriptorExtensions struct {
+	proto *desc.MessageDescriptor
+}
+
+func (mde msgDescriptorExtensions) extensions() []*desc.FieldDescriptor {
+	return mde.proto.GetNestedExtensions()
+}
+
+func (mde msgDescriptorExtensions) nestedScopes() []extensionScope {
+	scopes := make([]extensionScope, len(mde.proto.GetNestedMessageTypes()))
+	for i, m := range mde.proto.GetNestedMessageTypes() {
+		scopes[i] = msgDescriptorExtensions{m}
+	}
+	return scopes
+}
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
new file mode 100644
index 0000000..ec7bd02
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/doc.go
@@ -0,0 +1,10 @@
+// Package grpcreflect provides GRPC-specific extensions to protobuf reflection.
+// This includes a way to access rich service descriptors for all services that
+// a GRPC server exports.
+//
+// Also included is an easy-to-use client for the GRPC reflection service
+// (https://goo.gl/2ILAHf). This client makes it easy to ask a server (that
+// supports the reflection service) for metadata on its exported services, which
+// could be used to construct a dynamic client. (See the grpcdynamic package in
+// this same repo for more on that.)
+package grpcreflect
diff --git a/vendor/github.com/jhump/protoreflect/grpcreflect/server.go b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
new file mode 100644
index 0000000..c9ef619
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/grpcreflect/server.go
@@ -0,0 +1,61 @@
+package grpcreflect
+
+import (
+	"fmt"
+
+	"google.golang.org/grpc"
+
+	"github.com/jhump/protoreflect/desc"
+)
+
+// LoadServiceDescriptors loads the service descriptors for all services exposed by the
+// given GRPC server.
+func LoadServiceDescriptors(s *grpc.Server) (map[string]*desc.ServiceDescriptor, error) {
+	descs := map[string]*desc.ServiceDescriptor{}
+	for name, info := range s.GetServiceInfo() {
+		file, ok := info.Metadata.(string)
+		if !ok {
+			return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", name, info.Metadata)
+		}
+		fd, err := desc.LoadFileDescriptor(file)
+		if err != nil {
+			return nil, err
+		}
+		d := fd.FindSymbol(name)
+		if d == nil {
+			return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, name)
+		}
+		sd, ok := d.(*desc.ServiceDescriptor)
+		if !ok {
+			return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, name, d)
+		}
+		descs[name] = sd
+	}
+	return descs, nil
+}
+
+// LoadServiceDescriptor loads a rich descriptor for a given service description
+// generated by protoc-gen-go. Generated code contains an unexported symbol with
+// a name like "_<Service>_serviceDesc" which is the service's description. It
+// is used internally to register a service implementation with a GRPC server.
+// But it can also be used by this package to retrieve the rich descriptor for
+// the service.
+func LoadServiceDescriptor(svc *grpc.ServiceDesc) (*desc.ServiceDescriptor, error) {
+	file, ok := svc.Metadata.(string)
+	if !ok {
+		return nil, fmt.Errorf("service %q has unexpected metadata: expecting a string; got %v", svc.ServiceName, svc.Metadata)
+	}
+	fd, err := desc.LoadFileDescriptor(file)
+	if err != nil {
+		return nil, err
+	}
+	d := fd.FindSymbol(svc.ServiceName)
+	if d == nil {
+		return nil, fmt.Errorf("file descriptor for %q has no element named %q", file, svc.ServiceName)
+	}
+	sd, ok := d.(*desc.ServiceDescriptor)
+	if !ok {
+		return nil, fmt.Errorf("file descriptor for %q has incorrect element named %q: expecting a service descriptor; got %v", file, svc.ServiceName, d)
+	}
+	return sd, nil
+}
diff --git a/vendor/github.com/jhump/protoreflect/internal/standard_files.go b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
new file mode 100644
index 0000000..4a8b47a
--- /dev/null
+++ b/vendor/github.com/jhump/protoreflect/internal/standard_files.go
@@ -0,0 +1,127 @@
+// Package internal contains some code that should not be exported but needs to
+// be shared across more than one of the protoreflect sub-packages.
+package internal
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/golang/protobuf/proto"
+	dpb "github.com/golang/protobuf/protoc-gen-go/descriptor"
+)
+
+// TODO: replace this alias configuration with desc.RegisterImportPath?
+
+// StdFileAliases are the standard protos included with protoc, but older versions of
+// their respective packages registered them using incorrect paths.
+var StdFileAliases = map[string]string{
+	// Files for the github.com/golang/protobuf/ptypes package at one point were
+	// registered using the path where the proto files are mirrored in GOPATH,
+	// inside the golang/protobuf repo.
+	// (Fixed as of https://github.com/golang/protobuf/pull/412)
+	"google/protobuf/any.proto":       "github.com/golang/protobuf/ptypes/any/any.proto",
+	"google/protobuf/duration.proto":  "github.com/golang/protobuf/ptypes/duration/duration.proto",
+	"google/protobuf/empty.proto":     "github.com/golang/protobuf/ptypes/empty/empty.proto",
+	"google/protobuf/struct.proto":    "github.com/golang/protobuf/ptypes/struct/struct.proto",
+	"google/protobuf/timestamp.proto": "github.com/golang/protobuf/ptypes/timestamp/timestamp.proto",
+	"google/protobuf/wrappers.proto":  "github.com/golang/protobuf/ptypes/wrappers/wrappers.proto",
+	// Files for the google.golang.org/genproto/protobuf package at one point
+	// were registered with an anomalous "src/" prefix.
+	// (Fixed as of https://github.com/google/go-genproto/pull/31)
+	"google/protobuf/api.proto":            "src/google/protobuf/api.proto",
+	"google/protobuf/field_mask.proto":     "src/google/protobuf/field_mask.proto",
+	"google/protobuf/source_context.proto": "src/google/protobuf/source_context.proto",
+	"google/protobuf/type.proto":           "src/google/protobuf/type.proto",
+
+	// Other standard files (descriptor.proto and compiler/plugin.proto) are
+	// registered correctly, so we don't need rules for them here.
+}
+
+func init() {
+	// We provide aliasing in both directions, to support files with the
+	// proper import path linked against older versions of the generated
+	// files AND files that used the aliased import path but linked against
+	// newer versions of the generated files (which register with the
+	// correct path).
+
+	// Get all files defined above
+	keys := make([]string, 0, len(StdFileAliases))
+	for k := range StdFileAliases {
+		keys = append(keys, k)
+	}
+	// And add inverse mappings
+	for _, k := range keys {
+		alias := StdFileAliases[k]
+		StdFileAliases[alias] = k
+	}
+}
+
+type ErrNoSuchFile string
+
+func (e ErrNoSuchFile) Error() string {
+	return fmt.Sprintf("no such file: %q", string(e))
+}
+
+// LoadFileDescriptor loads a registered descriptor and decodes it. If the given
+// name cannot be loaded but is a known standard name, an alias will be tried,
+// so the standard files can be loaded even if linked against older "known bad"
+// versions of packages.
+func LoadFileDescriptor(file string) (*dpb.FileDescriptorProto, error) {
+	fdb := proto.FileDescriptor(file)
+	aliased := false
+	if fdb == nil {
+		var ok bool
+		alias, ok := StdFileAliases[file]
+		if ok {
+			aliased = true
+			if fdb = proto.FileDescriptor(alias); fdb == nil {
+				return nil, ErrNoSuchFile(file)
+			}
+		} else {
+			return nil, ErrNoSuchFile(file)
+		}
+	}
+
+	fd, err := DecodeFileDescriptor(file, fdb)
+	if err != nil {
+		return nil, err
+	}
+
+	if aliased {
+		// the file descriptor will have the alias used to load it, but
+		// we need it to have the specified name in order to link it
+		fd.Name = proto.String(file)
+	}
+
+	return fd, nil
+}
+
+// DecodeFileDescriptor decodes the bytes of a registered file descriptor.
+// Registered file descriptors are first "proto encoded" (e.g. binary format
+// for the descriptor protos) and then gzipped. So this function gunzips and
+// then unmarshals into a descriptor proto.
+func DecodeFileDescriptor(element string, fdb []byte) (*dpb.FileDescriptorProto, error) {
+	raw, err := decompress(fdb)
+	if err != nil {
+		return nil, fmt.Errorf("failed to decompress %q descriptor: %v", element, err)
+	}
+	fd := dpb.FileDescriptorProto{}
+	if err := proto.Unmarshal(raw, &fd); err != nil {
+		return nil, fmt.Errorf("bad descriptor for %q: %v", element, err)
+	}
+	return &fd, nil
+}
+
+func decompress(b []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(b))
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	out, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("bad gzipped descriptor: %v", err)
+	}
+	return out, nil
+}
diff --git a/vendor/github.com/json-iterator/go/.codecov.yml b/vendor/github.com/json-iterator/go/.codecov.yml
new file mode 100644
index 0000000..955dc0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.codecov.yml
@@ -0,0 +1,3 @@
+ignore:
+    - "output_tests/.*"
+
diff --git a/vendor/github.com/json-iterator/go/.gitignore b/vendor/github.com/json-iterator/go/.gitignore
new file mode 100644
index 0000000..1555653
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+/bug_test.go
+/coverage.txt
+/.idea
diff --git a/vendor/github.com/json-iterator/go/.travis.yml b/vendor/github.com/json-iterator/go/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock
new file mode 100644
index 0000000..c8a9fbb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.lock
@@ -0,0 +1,21 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/modern-go/concurrent"
+  packages = ["."]
+  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+  version = "1.0.0"
+
+[[projects]]
+  name = "github.com/modern-go/reflect2"
+  packages = ["."]
+  revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
+  version = "1.0.1"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml
new file mode 100644
index 0000000..313a0f8
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/Gopkg.toml
@@ -0,0 +1,26 @@
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#  name = "github.com/x/y"
+#  version = "2.4.0"
+
+ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"]
+
+[[constraint]]
+  name = "github.com/modern-go/reflect2"
+  version = "1.0.1"
diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE
new file mode 100644
index 0000000..2cf4f5a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2016 json-iterator
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md
new file mode 100644
index 0000000..50d56ff
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/README.md
@@ -0,0 +1,87 @@
+[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go)
+[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go)
+[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go)
+[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go)
+[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE)
+[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
+
+A high-performance 100% compatible drop-in replacement of "encoding/json"
+
+You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go)
+
+# Benchmark
+
+![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png)
+
+Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go
+
+Raw Result (easyjson requires static code generation)
+
+| | ns/op | allocation bytes | allocation times |
+| --- | --- | --- | --- |
+| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op |
+| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op |
+| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op |
+| std encode | 2213 ns/op | 712 B/op | 5 allocs/op |
+| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op |
+| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op |
+
+Always benchmark with your own workload. 
+The result depends heavily on the data input.
+
+# Usage
+
+100% compatibility with standard lib
+
+Replace
+
+```go
+import "encoding/json"
+json.Marshal(&data)
+```
+
+with 
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Marshal(&data)
+```
+
+Replace
+
+```go
+import "encoding/json"
+json.Unmarshal(input, &data)
+```
+
+with
+
+```go
+import "github.com/json-iterator/go"
+
+var json = jsoniter.ConfigCompatibleWithStandardLibrary
+json.Unmarshal(input, &data)
+```
+
+[More documentation](http://jsoniter.com/migrate-from-go-std.html)
+
+# How to get
+
+```
+go get github.com/json-iterator/go
+```
+
+# Contribution Welcomed !
+
+Contributors
+
+* [thockin](https://github.com/thockin) 
+* [mattn](https://github.com/mattn)
+* [cch123](https://github.com/cch123)
+* [Oleg Shaldybin](https://github.com/olegshaldybin)
+* [Jason Toffaletti](https://github.com/toffaletti)
+
+Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)
diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go
new file mode 100644
index 0000000..e674d0f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/adapter.go
@@ -0,0 +1,150 @@
+package jsoniter
+
+import (
+	"bytes"
+	"io"
+)
+
+// RawMessage to make replace json with jsoniter
+type RawMessage []byte
+
+// Unmarshal adapts to json/encoding Unmarshal API
+//
+// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v.
+// Refer to https://godoc.org/encoding/json#Unmarshal for more information
+func Unmarshal(data []byte, v interface{}) error {
+	return ConfigDefault.Unmarshal(data, v)
+}
+
+// UnmarshalFromString convenient method to read from string instead of []byte
+func UnmarshalFromString(str string, v interface{}) error {
+	return ConfigDefault.UnmarshalFromString(str, v)
+}
+
+// Get quick method to get value from deeply nested JSON structure
+func Get(data []byte, path ...interface{}) Any {
+	return ConfigDefault.Get(data, path...)
+}
+
+// Marshal adapts to json/encoding Marshal API
+//
+// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API
+// Refer to https://godoc.org/encoding/json#Marshal for more information
+func Marshal(v interface{}) ([]byte, error) {
+	return ConfigDefault.Marshal(v)
+}
+
+// MarshalIndent same as json.MarshalIndent. Prefix is not supported.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	return ConfigDefault.MarshalIndent(v, prefix, indent)
+}
+
+// MarshalToString convenient method to write as string instead of []byte
+func MarshalToString(v interface{}) (string, error) {
+	return ConfigDefault.MarshalToString(v)
+}
+
+// NewDecoder adapts to json/stream NewDecoder API.
+//
+// NewDecoder returns a new decoder that reads from r.
+//
+// Instead of a json/encoding Decoder, an Decoder is returned
+// Refer to https://godoc.org/encoding/json#NewDecoder for more information
+func NewDecoder(reader io.Reader) *Decoder {
+	return ConfigDefault.NewDecoder(reader)
+}
+
+// Decoder reads and decodes JSON values from an input stream.
+// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress)
+type Decoder struct {
+	iter *Iterator
+}
+
+// Decode decode JSON into interface{}
+func (adapter *Decoder) Decode(obj interface{}) error {
+	if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil {
+		if !adapter.iter.loadMore() {
+			return io.EOF
+		}
+	}
+	adapter.iter.ReadVal(obj)
+	err := adapter.iter.Error
+	if err == io.EOF {
+		return nil
+	}
+	return adapter.iter.Error
+}
+
+// More is there more?
+func (adapter *Decoder) More() bool {
+	iter := adapter.iter
+	if iter.Error != nil {
+		return false
+	}
+	c := iter.nextToken()
+	if c == 0 {
+		return false
+	}
+	iter.unreadByte()
+	return c != ']' && c != '}'
+}
+
+// Buffered remaining buffer
+func (adapter *Decoder) Buffered() io.Reader {
+	remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail]
+	return bytes.NewReader(remaining)
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (adapter *Decoder) UseNumber() {
+	cfg := adapter.iter.cfg.configBeforeFrozen
+	cfg.UseNumber = true
+	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// DisallowUnknownFields causes the Decoder to return an error when the destination
+// is a struct and the input contains object keys which do not match any
+// non-ignored, exported fields in the destination.
+func (adapter *Decoder) DisallowUnknownFields() {
+	cfg := adapter.iter.cfg.configBeforeFrozen
+	cfg.DisallowUnknownFields = true
+	adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
+}
+
+// NewEncoder same as json.NewEncoder
+func NewEncoder(writer io.Writer) *Encoder {
+	return ConfigDefault.NewEncoder(writer)
+}
+
+// Encoder same as json.Encoder
+type Encoder struct {
+	stream *Stream
+}
+
+// Encode encode interface{} as JSON to io.Writer
+func (adapter *Encoder) Encode(val interface{}) error {
+	adapter.stream.WriteVal(val)
+	adapter.stream.WriteRaw("\n")
+	adapter.stream.Flush()
+	return adapter.stream.Error
+}
+
+// SetIndent set the indention. Prefix is not supported
+func (adapter *Encoder) SetIndent(prefix, indent string) {
+	config := adapter.stream.cfg.configBeforeFrozen
+	config.IndentionStep = len(indent)
+	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// SetEscapeHTML escape html by default, set to false to disable
+func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
+	config := adapter.stream.cfg.configBeforeFrozen
+	config.EscapeHTML = escapeHTML
+	adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
+}
+
+// Valid reports whether data is a valid JSON encoding.
+func Valid(data []byte) bool {
+	return ConfigDefault.Valid(data)
+}
diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go
new file mode 100644
index 0000000..f6b8aea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any.go
@@ -0,0 +1,325 @@
+package jsoniter
+
+import (
+	"errors"
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"strconv"
+	"unsafe"
+)
+
+// Any generic object representation.
+// The lazy json implementation holds []byte and parse lazily.
+type Any interface {
+	LastError() error
+	ValueType() ValueType
+	MustBeValid() Any
+	ToBool() bool
+	ToInt() int
+	ToInt32() int32
+	ToInt64() int64
+	ToUint() uint
+	ToUint32() uint32
+	ToUint64() uint64
+	ToFloat32() float32
+	ToFloat64() float64
+	ToString() string
+	ToVal(val interface{})
+	Get(path ...interface{}) Any
+	Size() int
+	Keys() []string
+	GetInterface() interface{}
+	WriteTo(stream *Stream)
+}
+
+type baseAny struct{}
+
+func (any *baseAny) Get(path ...interface{}) Any {
+	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *baseAny) Size() int {
+	return 0
+}
+
+func (any *baseAny) Keys() []string {
+	return []string{}
+}
+
+func (any *baseAny) ToVal(obj interface{}) {
+	panic("not implemented")
+}
+
+// WrapInt32 turn int32 into Any interface
+func WrapInt32(val int32) Any {
+	return &int32Any{baseAny{}, val}
+}
+
+// WrapInt64 turn int64 into Any interface
+func WrapInt64(val int64) Any {
+	return &int64Any{baseAny{}, val}
+}
+
+// WrapUint32 turn uint32 into Any interface
+func WrapUint32(val uint32) Any {
+	return &uint32Any{baseAny{}, val}
+}
+
+// WrapUint64 turn uint64 into Any interface
+func WrapUint64(val uint64) Any {
+	return &uint64Any{baseAny{}, val}
+}
+
+// WrapFloat64 turn float64 into Any interface
+func WrapFloat64(val float64) Any {
+	return &floatAny{baseAny{}, val}
+}
+
+// WrapString turn string into Any interface
+func WrapString(val string) Any {
+	return &stringAny{baseAny{}, val}
+}
+
+// Wrap turn a go object into Any interface
+func Wrap(val interface{}) Any {
+	if val == nil {
+		return &nilAny{}
+	}
+	asAny, isAny := val.(Any)
+	if isAny {
+		return asAny
+	}
+	typ := reflect2.TypeOf(val)
+	switch typ.Kind() {
+	case reflect.Slice:
+		return wrapArray(val)
+	case reflect.Struct:
+		return wrapStruct(val)
+	case reflect.Map:
+		return wrapMap(val)
+	case reflect.String:
+		return WrapString(val.(string))
+	case reflect.Int:
+		if strconv.IntSize == 32 {
+			return WrapInt32(int32(val.(int)))
+		}
+		return WrapInt64(int64(val.(int)))
+	case reflect.Int8:
+		return WrapInt32(int32(val.(int8)))
+	case reflect.Int16:
+		return WrapInt32(int32(val.(int16)))
+	case reflect.Int32:
+		return WrapInt32(val.(int32))
+	case reflect.Int64:
+		return WrapInt64(val.(int64))
+	case reflect.Uint:
+		if strconv.IntSize == 32 {
+			return WrapUint32(uint32(val.(uint)))
+		}
+		return WrapUint64(uint64(val.(uint)))
+	case reflect.Uintptr:
+		if ptrSize == 32 {
+			return WrapUint32(uint32(val.(uintptr)))
+		}
+		return WrapUint64(uint64(val.(uintptr)))
+	case reflect.Uint8:
+		return WrapUint32(uint32(val.(uint8)))
+	case reflect.Uint16:
+		return WrapUint32(uint32(val.(uint16)))
+	case reflect.Uint32:
+		return WrapUint32(uint32(val.(uint32)))
+	case reflect.Uint64:
+		return WrapUint64(val.(uint64))
+	case reflect.Float32:
+		return WrapFloat64(float64(val.(float32)))
+	case reflect.Float64:
+		return WrapFloat64(val.(float64))
+	case reflect.Bool:
+		if val.(bool) == true {
+			return &trueAny{}
+		}
+		return &falseAny{}
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)}
+}
+
+// ReadAny read next JSON element as an Any object. It is a better json.RawMessage.
+func (iter *Iterator) ReadAny() Any {
+	return iter.readAny()
+}
+
+func (iter *Iterator) readAny() Any {
+	c := iter.nextToken()
+	switch c {
+	case '"':
+		iter.unreadByte()
+		return &stringAny{baseAny{}, iter.ReadString()}
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l') // null
+		return &nilAny{}
+	case 't':
+		iter.skipThreeBytes('r', 'u', 'e') // true
+		return &trueAny{}
+	case 'f':
+		iter.skipFourBytes('a', 'l', 's', 'e') // false
+		return &falseAny{}
+	case '{':
+		return iter.readObjectAny()
+	case '[':
+		return iter.readArrayAny()
+	case '-':
+		return iter.readNumberAny(false)
+	case 0:
+		return &invalidAny{baseAny{}, errors.New("input is empty")}
+	default:
+		return iter.readNumberAny(true)
+	}
+}
+
+func (iter *Iterator) readNumberAny(positive bool) Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipNumber()
+	lazyBuf := iter.stopCapture()
+	return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readObjectAny() Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipObject()
+	lazyBuf := iter.stopCapture()
+	return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func (iter *Iterator) readArrayAny() Any {
+	iter.startCapture(iter.head - 1)
+	iter.skipArray()
+	lazyBuf := iter.stopCapture()
+	return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil}
+}
+
+func locateObjectField(iter *Iterator, target string) []byte {
+	var found []byte
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		if field == target {
+			found = iter.SkipAndReturnBytes()
+			return false
+		}
+		iter.Skip()
+		return true
+	})
+	return found
+}
+
+func locateArrayElement(iter *Iterator, target int) []byte {
+	var found []byte
+	n := 0
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		if n == target {
+			found = iter.SkipAndReturnBytes()
+			return false
+		}
+		iter.Skip()
+		n++
+		return true
+	})
+	return found
+}
+
+func locatePath(iter *Iterator, path []interface{}) Any {
+	for i, pathKeyObj := range path {
+		switch pathKey := pathKeyObj.(type) {
+		case string:
+			valueBytes := locateObjectField(iter, pathKey)
+			if valueBytes == nil {
+				return newInvalidAny(path[i:])
+			}
+			iter.ResetBytes(valueBytes)
+		case int:
+			valueBytes := locateArrayElement(iter, pathKey)
+			if valueBytes == nil {
+				return newInvalidAny(path[i:])
+			}
+			iter.ResetBytes(valueBytes)
+		case int32:
+			if '*' == pathKey {
+				return iter.readAny().Get(path[i:]...)
+			}
+			return newInvalidAny(path[i:])
+		default:
+			return newInvalidAny(path[i:])
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		return &invalidAny{baseAny{}, iter.Error}
+	}
+	return iter.readAny()
+}
+
+var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem()
+
+func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ == anyType {
+		return &directAnyCodec{}
+	}
+	if typ.Implements(anyType) {
+		return &anyCodec{
+			valType: typ,
+		}
+	}
+	return nil
+}
+
+func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == anyType {
+		return &directAnyCodec{}
+	}
+	if typ.Implements(anyType) {
+		return &anyCodec{
+			valType: typ,
+		}
+	}
+	return nil
+}
+
+type anyCodec struct {
+	valType reflect2.Type
+}
+
+func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	panic("not implemented")
+}
+
+func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := codec.valType.UnsafeIndirect(ptr)
+	any := obj.(Any)
+	any.WriteTo(stream)
+}
+
+func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	obj := codec.valType.UnsafeIndirect(ptr)
+	any := obj.(Any)
+	return any.Size() == 0
+}
+
+type directAnyCodec struct {
+}
+
+func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*(*Any)(ptr) = iter.readAny()
+}
+
+func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	any := *(*Any)(ptr)
+	if any == nil {
+		stream.WriteNil()
+		return
+	}
+	any.WriteTo(stream)
+}
+
+func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	any := *(*Any)(ptr)
+	return any.Size() == 0
+}
diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go
new file mode 100644
index 0000000..0449e9a
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_array.go
@@ -0,0 +1,278 @@
+package jsoniter
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type arrayLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *arrayLazyAny) ValueType() ValueType {
+	return ArrayValue
+}
+
+func (any *arrayLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *arrayLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *arrayLazyAny) ToBool() bool {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.ReadArray()
+}
+
+func (any *arrayLazyAny) ToInt() int {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToInt32() int32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToInt64() int64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint() uint {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint32() uint32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToUint64() uint64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToFloat32() float32 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToFloat64() float64 {
+	if any.ToBool() {
+		return 1
+	}
+	return 0
+}
+
+func (any *arrayLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *arrayLazyAny) ToVal(val interface{}) {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadVal(val)
+}
+
+func (any *arrayLazyAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int:
+		iter := any.cfg.BorrowIterator(any.buf)
+		defer any.cfg.ReturnIterator(iter)
+		valueBytes := locateArrayElement(iter, firstPath)
+		if valueBytes == nil {
+			return newInvalidAny(path)
+		}
+		iter.ResetBytes(valueBytes)
+		return locatePath(iter, path[1:])
+	case int32:
+		if '*' == firstPath {
+			iter := any.cfg.BorrowIterator(any.buf)
+			defer any.cfg.ReturnIterator(iter)
+			arr := make([]Any, 0)
+			iter.ReadArrayCB(func(iter *Iterator) bool {
+				found := iter.readAny().Get(path[1:]...)
+				if found.ValueType() != InvalidValue {
+					arr = append(arr, found)
+				}
+				return true
+			})
+			return wrapArray(arr)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *arrayLazyAny) Size() int {
+	size := 0
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		size++
+		iter.Skip()
+		return true
+	})
+	return size
+}
+
+func (any *arrayLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *arrayLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
+
+type arrayAny struct {
+	baseAny
+	val reflect.Value
+}
+
+func wrapArray(val interface{}) *arrayAny {
+	return &arrayAny{baseAny{}, reflect.ValueOf(val)}
+}
+
+func (any *arrayAny) ValueType() ValueType {
+	return ArrayValue
+}
+
+func (any *arrayAny) MustBeValid() Any {
+	return any
+}
+
+func (any *arrayAny) LastError() error {
+	return nil
+}
+
+func (any *arrayAny) ToBool() bool {
+	return any.val.Len() != 0
+}
+
+func (any *arrayAny) ToInt() int {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToInt32() int32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToInt64() int64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint() uint {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint32() uint32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToUint64() uint64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToFloat32() float32 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToFloat64() float64 {
+	if any.val.Len() == 0 {
+		return 0
+	}
+	return 1
+}
+
+func (any *arrayAny) ToString() string {
+	str, _ := MarshalToString(any.val.Interface())
+	return str
+}
+
+func (any *arrayAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int:
+		if firstPath < 0 || firstPath >= any.val.Len() {
+			return newInvalidAny(path)
+		}
+		return Wrap(any.val.Index(firstPath).Interface())
+	case int32:
+		if '*' == firstPath {
+			mappedAll := make([]Any, 0)
+			for i := 0; i < any.val.Len(); i++ {
+				mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...)
+				if mapped.ValueType() != InvalidValue {
+					mappedAll = append(mappedAll, mapped)
+				}
+			}
+			return wrapArray(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *arrayAny) Size() int {
+	return any.val.Len()
+}
+
+func (any *arrayAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *arrayAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go
new file mode 100644
index 0000000..9452324
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_bool.go
@@ -0,0 +1,137 @@
+package jsoniter
+
+type trueAny struct {
+	baseAny
+}
+
+func (any *trueAny) LastError() error {
+	return nil
+}
+
+func (any *trueAny) ToBool() bool {
+	return true
+}
+
+func (any *trueAny) ToInt() int {
+	return 1
+}
+
+func (any *trueAny) ToInt32() int32 {
+	return 1
+}
+
+func (any *trueAny) ToInt64() int64 {
+	return 1
+}
+
+func (any *trueAny) ToUint() uint {
+	return 1
+}
+
+func (any *trueAny) ToUint32() uint32 {
+	return 1
+}
+
+func (any *trueAny) ToUint64() uint64 {
+	return 1
+}
+
+func (any *trueAny) ToFloat32() float32 {
+	return 1
+}
+
+func (any *trueAny) ToFloat64() float64 {
+	return 1
+}
+
+func (any *trueAny) ToString() string {
+	return "true"
+}
+
+func (any *trueAny) WriteTo(stream *Stream) {
+	stream.WriteTrue()
+}
+
+func (any *trueAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *trueAny) GetInterface() interface{} {
+	return true
+}
+
+func (any *trueAny) ValueType() ValueType {
+	return BoolValue
+}
+
+func (any *trueAny) MustBeValid() Any {
+	return any
+}
+
+type falseAny struct {
+	baseAny
+}
+
+func (any *falseAny) LastError() error {
+	return nil
+}
+
+func (any *falseAny) ToBool() bool {
+	return false
+}
+
+func (any *falseAny) ToInt() int {
+	return 0
+}
+
+func (any *falseAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *falseAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *falseAny) ToUint() uint {
+	return 0
+}
+
+func (any *falseAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *falseAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *falseAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *falseAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *falseAny) ToString() string {
+	return "false"
+}
+
+func (any *falseAny) WriteTo(stream *Stream) {
+	stream.WriteFalse()
+}
+
+func (any *falseAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *falseAny) GetInterface() interface{} {
+	return false
+}
+
+func (any *falseAny) ValueType() ValueType {
+	return BoolValue
+}
+
+func (any *falseAny) MustBeValid() Any {
+	return any
+}
diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go
new file mode 100644
index 0000000..35fdb09
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_float.go
@@ -0,0 +1,83 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type floatAny struct {
+	baseAny
+	val float64
+}
+
+func (any *floatAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *floatAny) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *floatAny) MustBeValid() Any {
+	return any
+}
+
+func (any *floatAny) LastError() error {
+	return nil
+}
+
+func (any *floatAny) ToBool() bool {
+	return any.ToFloat64() != 0
+}
+
+func (any *floatAny) ToInt() int {
+	return int(any.val)
+}
+
+func (any *floatAny) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *floatAny) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *floatAny) ToUint() uint {
+	if any.val > 0 {
+		return uint(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToUint32() uint32 {
+	if any.val > 0 {
+		return uint32(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToUint64() uint64 {
+	if any.val > 0 {
+		return uint64(any.val)
+	}
+	return 0
+}
+
+func (any *floatAny) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *floatAny) ToFloat64() float64 {
+	return any.val
+}
+
+func (any *floatAny) ToString() string {
+	return strconv.FormatFloat(any.val, 'E', -1, 64)
+}
+
+func (any *floatAny) WriteTo(stream *Stream) {
+	stream.WriteFloat64(any.val)
+}
+
+func (any *floatAny) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go
new file mode 100644
index 0000000..1b56f39
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type int32Any struct {
+	baseAny
+	val int32
+}
+
+func (any *int32Any) LastError() error {
+	return nil
+}
+
+func (any *int32Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *int32Any) MustBeValid() Any {
+	return any
+}
+
+func (any *int32Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *int32Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *int32Any) ToInt32() int32 {
+	return any.val
+}
+
+func (any *int32Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *int32Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *int32Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *int32Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *int32Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *int32Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *int32Any) ToString() string {
+	return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *int32Any) WriteTo(stream *Stream) {
+	stream.WriteInt32(any.val)
+}
+
+func (any *int32Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *int32Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go
new file mode 100644
index 0000000..c440d72
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_int64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type int64Any struct {
+	baseAny
+	val int64
+}
+
+func (any *int64Any) LastError() error {
+	return nil
+}
+
+func (any *int64Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *int64Any) MustBeValid() Any {
+	return any
+}
+
+func (any *int64Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *int64Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *int64Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *int64Any) ToInt64() int64 {
+	return any.val
+}
+
+func (any *int64Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *int64Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *int64Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *int64Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *int64Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *int64Any) ToString() string {
+	return strconv.FormatInt(any.val, 10)
+}
+
+func (any *int64Any) WriteTo(stream *Stream) {
+	stream.WriteInt64(any.val)
+}
+
+func (any *int64Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *int64Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go
new file mode 100644
index 0000000..1d859ea
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_invalid.go
@@ -0,0 +1,82 @@
+package jsoniter
+
+import "fmt"
+
+type invalidAny struct {
+	baseAny
+	err error
+}
+
+func newInvalidAny(path []interface{}) *invalidAny {
+	return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)}
+}
+
+func (any *invalidAny) LastError() error {
+	return any.err
+}
+
+func (any *invalidAny) ValueType() ValueType {
+	return InvalidValue
+}
+
+func (any *invalidAny) MustBeValid() Any {
+	panic(any.err)
+}
+
+func (any *invalidAny) ToBool() bool {
+	return false
+}
+
+func (any *invalidAny) ToInt() int {
+	return 0
+}
+
+func (any *invalidAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *invalidAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *invalidAny) ToUint() uint {
+	return 0
+}
+
+func (any *invalidAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *invalidAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *invalidAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *invalidAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *invalidAny) ToString() string {
+	return ""
+}
+
+func (any *invalidAny) WriteTo(stream *Stream) {
+}
+
+func (any *invalidAny) Get(path ...interface{}) Any {
+	if any.err == nil {
+		return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)}
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)}
+}
+
+func (any *invalidAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *invalidAny) GetInterface() interface{} {
+	return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go
new file mode 100644
index 0000000..d04cb54
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_nil.go
@@ -0,0 +1,69 @@
+package jsoniter
+
+type nilAny struct {
+	baseAny
+}
+
+func (any *nilAny) LastError() error {
+	return nil
+}
+
+func (any *nilAny) ValueType() ValueType {
+	return NilValue
+}
+
+func (any *nilAny) MustBeValid() Any {
+	return any
+}
+
+func (any *nilAny) ToBool() bool {
+	return false
+}
+
+func (any *nilAny) ToInt() int {
+	return 0
+}
+
+func (any *nilAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *nilAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *nilAny) ToUint() uint {
+	return 0
+}
+
+func (any *nilAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *nilAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *nilAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *nilAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *nilAny) ToString() string {
+	return ""
+}
+
+func (any *nilAny) WriteTo(stream *Stream) {
+	stream.WriteNil()
+}
+
+func (any *nilAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *nilAny) GetInterface() interface{} {
+	return nil
+}
diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go
new file mode 100644
index 0000000..9d1e901
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_number.go
@@ -0,0 +1,123 @@
+package jsoniter
+
+import (
+	"io"
+	"unsafe"
+)
+
+type numberLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *numberLazyAny) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *numberLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *numberLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *numberLazyAny) ToBool() bool {
+	return any.ToFloat64() != 0
+}
+
+func (any *numberLazyAny) ToInt() int {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToInt32() int32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToInt64() int64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadInt64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint() uint {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint32() uint32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToUint64() uint64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadUint64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToFloat32() float32 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadFloat32()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToFloat64() float64 {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	val := iter.ReadFloat64()
+	if iter.Error != nil && iter.Error != io.EOF {
+		any.err = iter.Error
+	}
+	return val
+}
+
+func (any *numberLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *numberLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *numberLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go
new file mode 100644
index 0000000..c44ef5c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_object.go
@@ -0,0 +1,374 @@
+package jsoniter
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type objectLazyAny struct {
+	baseAny
+	cfg *frozenConfig
+	buf []byte
+	err error
+}
+
+func (any *objectLazyAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *objectLazyAny) MustBeValid() Any {
+	return any
+}
+
+func (any *objectLazyAny) LastError() error {
+	return any.err
+}
+
+func (any *objectLazyAny) ToBool() bool {
+	return true
+}
+
+func (any *objectLazyAny) ToInt() int {
+	return 0
+}
+
+func (any *objectLazyAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint() uint {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *objectLazyAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *objectLazyAny) ToString() string {
+	return *(*string)(unsafe.Pointer(&any.buf))
+}
+
+func (any *objectLazyAny) ToVal(obj interface{}) {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadVal(obj)
+}
+
+func (any *objectLazyAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case string:
+		iter := any.cfg.BorrowIterator(any.buf)
+		defer any.cfg.ReturnIterator(iter)
+		valueBytes := locateObjectField(iter, firstPath)
+		if valueBytes == nil {
+			return newInvalidAny(path)
+		}
+		iter.ResetBytes(valueBytes)
+		return locatePath(iter, path[1:])
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			iter := any.cfg.BorrowIterator(any.buf)
+			defer any.cfg.ReturnIterator(iter)
+			iter.ReadMapCB(func(iter *Iterator, field string) bool {
+				mapped := locatePath(iter, path[1:])
+				if mapped.ValueType() != InvalidValue {
+					mappedAll[field] = mapped
+				}
+				return true
+			})
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *objectLazyAny) Keys() []string {
+	keys := []string{}
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadMapCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		keys = append(keys, field)
+		return true
+	})
+	return keys
+}
+
+func (any *objectLazyAny) Size() int {
+	size := 0
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		size++
+		return true
+	})
+	return size
+}
+
+func (any *objectLazyAny) WriteTo(stream *Stream) {
+	stream.Write(any.buf)
+}
+
+func (any *objectLazyAny) GetInterface() interface{} {
+	iter := any.cfg.BorrowIterator(any.buf)
+	defer any.cfg.ReturnIterator(iter)
+	return iter.Read()
+}
+
+type objectAny struct {
+	baseAny
+	err error
+	val reflect.Value
+}
+
+func wrapStruct(val interface{}) *objectAny {
+	return &objectAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *objectAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *objectAny) MustBeValid() Any {
+	return any
+}
+
+func (any *objectAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *objectAny) LastError() error {
+	return any.err
+}
+
+func (any *objectAny) ToBool() bool {
+	return any.val.NumField() != 0
+}
+
+func (any *objectAny) ToInt() int {
+	return 0
+}
+
+func (any *objectAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *objectAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *objectAny) ToUint() uint {
+	return 0
+}
+
+func (any *objectAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *objectAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *objectAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *objectAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *objectAny) ToString() string {
+	str, err := MarshalToString(any.val.Interface())
+	any.err = err
+	return str
+}
+
+func (any *objectAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case string:
+		field := any.val.FieldByName(firstPath)
+		if !field.IsValid() {
+			return newInvalidAny(path)
+		}
+		return Wrap(field.Interface())
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			for i := 0; i < any.val.NumField(); i++ {
+				field := any.val.Field(i)
+				if field.CanInterface() {
+					mapped := Wrap(field.Interface()).Get(path[1:]...)
+					if mapped.ValueType() != InvalidValue {
+						mappedAll[any.val.Type().Field(i).Name] = mapped
+					}
+				}
+			}
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		return newInvalidAny(path)
+	}
+}
+
+func (any *objectAny) Keys() []string {
+	keys := make([]string, 0, any.val.NumField())
+	for i := 0; i < any.val.NumField(); i++ {
+		keys = append(keys, any.val.Type().Field(i).Name)
+	}
+	return keys
+}
+
+func (any *objectAny) Size() int {
+	return any.val.NumField()
+}
+
+func (any *objectAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *objectAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
+
+type mapAny struct {
+	baseAny
+	err error
+	val reflect.Value
+}
+
+func wrapMap(val interface{}) *mapAny {
+	return &mapAny{baseAny{}, nil, reflect.ValueOf(val)}
+}
+
+func (any *mapAny) ValueType() ValueType {
+	return ObjectValue
+}
+
+func (any *mapAny) MustBeValid() Any {
+	return any
+}
+
+func (any *mapAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *mapAny) LastError() error {
+	return any.err
+}
+
+func (any *mapAny) ToBool() bool {
+	return true
+}
+
+func (any *mapAny) ToInt() int {
+	return 0
+}
+
+func (any *mapAny) ToInt32() int32 {
+	return 0
+}
+
+func (any *mapAny) ToInt64() int64 {
+	return 0
+}
+
+func (any *mapAny) ToUint() uint {
+	return 0
+}
+
+func (any *mapAny) ToUint32() uint32 {
+	return 0
+}
+
+func (any *mapAny) ToUint64() uint64 {
+	return 0
+}
+
+func (any *mapAny) ToFloat32() float32 {
+	return 0
+}
+
+func (any *mapAny) ToFloat64() float64 {
+	return 0
+}
+
+func (any *mapAny) ToString() string {
+	str, err := MarshalToString(any.val.Interface())
+	any.err = err
+	return str
+}
+
+func (any *mapAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	switch firstPath := path[0].(type) {
+	case int32:
+		if '*' == firstPath {
+			mappedAll := map[string]Any{}
+			for _, key := range any.val.MapKeys() {
+				keyAsStr := key.String()
+				element := Wrap(any.val.MapIndex(key).Interface())
+				mapped := element.Get(path[1:]...)
+				if mapped.ValueType() != InvalidValue {
+					mappedAll[keyAsStr] = mapped
+				}
+			}
+			return wrapMap(mappedAll)
+		}
+		return newInvalidAny(path)
+	default:
+		value := any.val.MapIndex(reflect.ValueOf(firstPath))
+		if !value.IsValid() {
+			return newInvalidAny(path)
+		}
+		return Wrap(value.Interface())
+	}
+}
+
+func (any *mapAny) Keys() []string {
+	keys := make([]string, 0, any.val.Len())
+	for _, key := range any.val.MapKeys() {
+		keys = append(keys, key.String())
+	}
+	return keys
+}
+
+func (any *mapAny) Size() int {
+	return any.val.Len()
+}
+
+func (any *mapAny) WriteTo(stream *Stream) {
+	stream.WriteVal(any.val)
+}
+
+func (any *mapAny) GetInterface() interface{} {
+	return any.val.Interface()
+}
diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go
new file mode 100644
index 0000000..a4b93c7
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_str.go
@@ -0,0 +1,166 @@
+package jsoniter
+
+import (
+	"fmt"
+	"strconv"
+)
+
+type stringAny struct {
+	baseAny
+	val string
+}
+
+func (any *stringAny) Get(path ...interface{}) Any {
+	if len(path) == 0 {
+		return any
+	}
+	return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)}
+}
+
+func (any *stringAny) Parse() *Iterator {
+	return nil
+}
+
+func (any *stringAny) ValueType() ValueType {
+	return StringValue
+}
+
+func (any *stringAny) MustBeValid() Any {
+	return any
+}
+
+func (any *stringAny) LastError() error {
+	return nil
+}
+
+func (any *stringAny) ToBool() bool {
+	str := any.ToString()
+	if str == "0" {
+		return false
+	}
+	for _, c := range str {
+		switch c {
+		case ' ', '\n', '\r', '\t':
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+func (any *stringAny) ToInt() int {
+	return int(any.ToInt64())
+
+}
+
+func (any *stringAny) ToInt32() int32 {
+	return int32(any.ToInt64())
+}
+
+func (any *stringAny) ToInt64() int64 {
+	if any.val == "" {
+		return 0
+	}
+
+	flag := 1
+	startPos := 0
+	endPos := 0
+	if any.val[0] == '+' || any.val[0] == '-' {
+		startPos = 1
+	}
+
+	if any.val[0] == '-' {
+		flag = -1
+	}
+
+	for i := startPos; i < len(any.val); i++ {
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			break
+		}
+	}
+	parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64)
+	return int64(flag) * parsed
+}
+
+func (any *stringAny) ToUint() uint {
+	return uint(any.ToUint64())
+}
+
+func (any *stringAny) ToUint32() uint32 {
+	return uint32(any.ToUint64())
+}
+
+func (any *stringAny) ToUint64() uint64 {
+	if any.val == "" {
+		return 0
+	}
+
+	startPos := 0
+	endPos := 0
+
+	if any.val[0] == '-' {
+		return 0
+	}
+	if any.val[0] == '+' {
+		startPos = 1
+	}
+
+	for i := startPos; i < len(any.val); i++ {
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			break
+		}
+	}
+	parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64)
+	return parsed
+}
+
+func (any *stringAny) ToFloat32() float32 {
+	return float32(any.ToFloat64())
+}
+
+func (any *stringAny) ToFloat64() float64 {
+	if len(any.val) == 0 {
+		return 0
+	}
+
+	// first char invalid
+	if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') {
+		return 0
+	}
+
+	// extract valid num expression from string
+	// eg 123true => 123, -12.12xxa => -12.12
+	endPos := 1
+	for i := 1; i < len(any.val); i++ {
+		if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' {
+			endPos = i + 1
+			continue
+		}
+
+		// end position is the first char which is not digit
+		if any.val[i] >= '0' && any.val[i] <= '9' {
+			endPos = i + 1
+		} else {
+			endPos = i
+			break
+		}
+	}
+	parsed, _ := strconv.ParseFloat(any.val[:endPos], 64)
+	return parsed
+}
+
+func (any *stringAny) ToString() string {
+	return any.val
+}
+
+func (any *stringAny) WriteTo(stream *Stream) {
+	stream.WriteString(any.val)
+}
+
+func (any *stringAny) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go
new file mode 100644
index 0000000..656bbd3
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint32.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type uint32Any struct {
+	baseAny
+	val uint32
+}
+
+func (any *uint32Any) LastError() error {
+	return nil
+}
+
+func (any *uint32Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *uint32Any) MustBeValid() Any {
+	return any
+}
+
+func (any *uint32Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *uint32Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *uint32Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *uint32Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *uint32Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *uint32Any) ToUint32() uint32 {
+	return any.val
+}
+
+func (any *uint32Any) ToUint64() uint64 {
+	return uint64(any.val)
+}
+
+func (any *uint32Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *uint32Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *uint32Any) ToString() string {
+	return strconv.FormatInt(int64(any.val), 10)
+}
+
+func (any *uint32Any) WriteTo(stream *Stream) {
+	stream.WriteUint32(any.val)
+}
+
+func (any *uint32Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *uint32Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go
new file mode 100644
index 0000000..7df2fce
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/any_uint64.go
@@ -0,0 +1,74 @@
+package jsoniter
+
+import (
+	"strconv"
+)
+
+type uint64Any struct {
+	baseAny
+	val uint64
+}
+
+func (any *uint64Any) LastError() error {
+	return nil
+}
+
+func (any *uint64Any) ValueType() ValueType {
+	return NumberValue
+}
+
+func (any *uint64Any) MustBeValid() Any {
+	return any
+}
+
+func (any *uint64Any) ToBool() bool {
+	return any.val != 0
+}
+
+func (any *uint64Any) ToInt() int {
+	return int(any.val)
+}
+
+func (any *uint64Any) ToInt32() int32 {
+	return int32(any.val)
+}
+
+func (any *uint64Any) ToInt64() int64 {
+	return int64(any.val)
+}
+
+func (any *uint64Any) ToUint() uint {
+	return uint(any.val)
+}
+
+func (any *uint64Any) ToUint32() uint32 {
+	return uint32(any.val)
+}
+
+func (any *uint64Any) ToUint64() uint64 {
+	return any.val
+}
+
+func (any *uint64Any) ToFloat32() float32 {
+	return float32(any.val)
+}
+
+func (any *uint64Any) ToFloat64() float64 {
+	return float64(any.val)
+}
+
+func (any *uint64Any) ToString() string {
+	return strconv.FormatUint(any.val, 10)
+}
+
+func (any *uint64Any) WriteTo(stream *Stream) {
+	stream.WriteUint64(any.val)
+}
+
+func (any *uint64Any) Parse() *Iterator {
+	return nil
+}
+
+func (any *uint64Any) GetInterface() interface{} {
+	return any.val
+}
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
new file mode 100755
index 0000000..b45ef68
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/build.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+set -e
+set -x
+
+if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
+    mkdir -p /tmp/build-golang/src/github.com/json-iterator
+    ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
+fi
+export GOPATH=/tmp/build-golang
+go get -u github.com/golang/dep/cmd/dep
+cd /tmp/build-golang/src/github.com/json-iterator/go
+exec $GOPATH/bin/dep ensure -update
diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go
new file mode 100644
index 0000000..8c58fcb
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/config.go
@@ -0,0 +1,375 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"io"
+	"reflect"
+	"sync"
+	"unsafe"
+
+	"github.com/modern-go/concurrent"
+	"github.com/modern-go/reflect2"
+)
+
+// Config customize how the API should behave.
+// The API is created from Config by Froze.
+type Config struct {
+	IndentionStep                 int
+	MarshalFloatWith6Digits       bool
+	EscapeHTML                    bool
+	SortMapKeys                   bool
+	UseNumber                     bool
+	DisallowUnknownFields         bool
+	TagKey                        string
+	OnlyTaggedField               bool
+	ValidateJsonRawMessage        bool
+	ObjectFieldMustBeSimpleString bool
+	CaseSensitive                 bool
+}
+
+// API the public interface of this package.
+// Primary Marshal and Unmarshal.
+type API interface {
+	IteratorPool
+	StreamPool
+	MarshalToString(v interface{}) (string, error)
+	Marshal(v interface{}) ([]byte, error)
+	MarshalIndent(v interface{}, prefix, indent string) ([]byte, error)
+	UnmarshalFromString(str string, v interface{}) error
+	Unmarshal(data []byte, v interface{}) error
+	Get(data []byte, path ...interface{}) Any
+	NewEncoder(writer io.Writer) *Encoder
+	NewDecoder(reader io.Reader) *Decoder
+	Valid(data []byte) bool
+	RegisterExtension(extension Extension)
+	DecoderOf(typ reflect2.Type) ValDecoder
+	EncoderOf(typ reflect2.Type) ValEncoder
+}
+
+// ConfigDefault the default API
+var ConfigDefault = Config{
+	EscapeHTML: true,
+}.Froze()
+
+// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
+var ConfigCompatibleWithStandardLibrary = Config{
+	EscapeHTML:             true,
+	SortMapKeys:            true,
+	ValidateJsonRawMessage: true,
+}.Froze()
+
+// ConfigFastest marshals float with only 6 digits precision
+var ConfigFastest = Config{
+	EscapeHTML:                    false,
+	MarshalFloatWith6Digits:       true, // will lose precession
+	ObjectFieldMustBeSimpleString: true, // do not unescape object field
+}.Froze()
+
+type frozenConfig struct {
+	configBeforeFrozen            Config
+	sortMapKeys                   bool
+	indentionStep                 int
+	objectFieldMustBeSimpleString bool
+	onlyTaggedField               bool
+	disallowUnknownFields         bool
+	decoderCache                  *concurrent.Map
+	encoderCache                  *concurrent.Map
+	encoderExtension              Extension
+	decoderExtension              Extension
+	extraExtensions               []Extension
+	streamPool                    *sync.Pool
+	iteratorPool                  *sync.Pool
+	caseSensitive                 bool
+}
+
+func (cfg *frozenConfig) initCache() {
+	cfg.decoderCache = concurrent.NewMap()
+	cfg.encoderCache = concurrent.NewMap()
+}
+
+func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) {
+	cfg.decoderCache.Store(cacheKey, decoder)
+}
+
+func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) {
+	cfg.encoderCache.Store(cacheKey, encoder)
+}
+
+func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder {
+	decoder, found := cfg.decoderCache.Load(cacheKey)
+	if found {
+		return decoder.(ValDecoder)
+	}
+	return nil
+}
+
+func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder {
+	encoder, found := cfg.encoderCache.Load(cacheKey)
+	if found {
+		return encoder.(ValEncoder)
+	}
+	return nil
+}
+
+var cfgCache = concurrent.NewMap()
+
+func getFrozenConfigFromCache(cfg Config) *frozenConfig {
+	obj, found := cfgCache.Load(cfg)
+	if found {
+		return obj.(*frozenConfig)
+	}
+	return nil
+}
+
+func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) {
+	cfgCache.Store(cfg, frozenConfig)
+}
+
+// Froze forge API from config
+func (cfg Config) Froze() API {
+	api := &frozenConfig{
+		sortMapKeys:                   cfg.SortMapKeys,
+		indentionStep:                 cfg.IndentionStep,
+		objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
+		onlyTaggedField:               cfg.OnlyTaggedField,
+		disallowUnknownFields:         cfg.DisallowUnknownFields,
+		caseSensitive:                 cfg.CaseSensitive,
+	}
+	api.streamPool = &sync.Pool{
+		New: func() interface{} {
+			return NewStream(api, nil, 512)
+		},
+	}
+	api.iteratorPool = &sync.Pool{
+		New: func() interface{} {
+			return NewIterator(api)
+		},
+	}
+	api.initCache()
+	encoderExtension := EncoderExtension{}
+	decoderExtension := DecoderExtension{}
+	if cfg.MarshalFloatWith6Digits {
+		api.marshalFloatWith6Digits(encoderExtension)
+	}
+	if cfg.EscapeHTML {
+		api.escapeHTML(encoderExtension)
+	}
+	if cfg.UseNumber {
+		api.useNumber(decoderExtension)
+	}
+	if cfg.ValidateJsonRawMessage {
+		api.validateJsonRawMessage(encoderExtension)
+	}
+	api.encoderExtension = encoderExtension
+	api.decoderExtension = decoderExtension
+	api.configBeforeFrozen = cfg
+	return api
+}
+
+func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
+	api := getFrozenConfigFromCache(cfg)
+	if api != nil {
+		return api
+	}
+	api = cfg.Froze().(*frozenConfig)
+	for _, extension := range extraExtensions {
+		api.RegisterExtension(extension)
+	}
+	addFrozenConfigToCache(cfg, api)
+	return api
+}
+
+func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
+	encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
+		rawMessage := *(*json.RawMessage)(ptr)
+		iter := cfg.BorrowIterator([]byte(rawMessage))
+		iter.Read()
+		if iter.Error != nil {
+			stream.WriteRaw("null")
+		} else {
+			cfg.ReturnIterator(iter)
+			stream.WriteRaw(string(rawMessage))
+		}
+	}, func(ptr unsafe.Pointer) bool {
+		return len(*((*json.RawMessage)(ptr))) == 0
+	}}
+	extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
+	extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
+}
+
+func (cfg *frozenConfig) useNumber(extension DecoderExtension) {
+	extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
+		exitingValue := *((*interface{})(ptr))
+		if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr {
+			iter.ReadVal(exitingValue)
+			return
+		}
+		if iter.WhatIsNext() == NumberValue {
+			*((*interface{})(ptr)) = json.Number(iter.readNumberAsString())
+		} else {
+			*((*interface{})(ptr)) = iter.Read()
+		}
+	}}
+}
+func (cfg *frozenConfig) getTagKey() string {
+	tagKey := cfg.configBeforeFrozen.TagKey
+	if tagKey == "" {
+		return "json"
+	}
+	return tagKey
+}
+
+func (cfg *frozenConfig) RegisterExtension(extension Extension) {
+	cfg.extraExtensions = append(cfg.extraExtensions, extension)
+	copied := cfg.configBeforeFrozen
+	cfg.configBeforeFrozen = copied
+}
+
+type lossyFloat32Encoder struct {
+}
+
+func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat32Lossy(*((*float32)(ptr)))
+}
+
+func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float32)(ptr)) == 0
+}
+
+type lossyFloat64Encoder struct {
+}
+
+func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat64Lossy(*((*float64)(ptr)))
+}
+
+func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float64)(ptr)) == 0
+}
+
+// EnableLossyFloatMarshalling keeps 10**(-6) precision
+// for float variables for better performance.
+func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) {
+	// for better performance
+	extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{}
+	extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{}
+}
+
+type htmlEscapedStringEncoder struct {
+}
+
+func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	str := *((*string)(ptr))
+	stream.WriteStringWithHTMLEscaped(str)
+}
+
+func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*string)(ptr)) == ""
+}
+
+func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) {
+	encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{}
+}
+
+func (cfg *frozenConfig) cleanDecoders() {
+	typeDecoders = map[string]ValDecoder{}
+	fieldDecoders = map[string]ValDecoder{}
+	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) cleanEncoders() {
+	typeEncoders = map[string]ValEncoder{}
+	fieldEncoders = map[string]ValEncoder{}
+	*cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig))
+}
+
+func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) {
+	stream := cfg.BorrowStream(nil)
+	defer cfg.ReturnStream(stream)
+	stream.WriteVal(v)
+	if stream.Error != nil {
+		return "", stream.Error
+	}
+	return string(stream.Buffer()), nil
+}
+
+func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) {
+	stream := cfg.BorrowStream(nil)
+	defer cfg.ReturnStream(stream)
+	stream.WriteVal(v)
+	if stream.Error != nil {
+		return nil, stream.Error
+	}
+	result := stream.Buffer()
+	copied := make([]byte, len(result))
+	copy(copied, result)
+	return copied, nil
+}
+
+func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	if prefix != "" {
+		panic("prefix is not supported")
+	}
+	for _, r := range indent {
+		if r != ' ' {
+			panic("indent can only be space")
+		}
+	}
+	newCfg := cfg.configBeforeFrozen
+	newCfg.IndentionStep = len(indent)
+	return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
+}
+
+func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
+	data := []byte(str)
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.ReadVal(v)
+	c := iter.nextToken()
+	if c == 0 {
+		if iter.Error == io.EOF {
+			return nil
+		}
+		return iter.Error
+	}
+	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+	return iter.Error
+}
+
+func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	return locatePath(iter, path)
+}
+
+func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.ReadVal(v)
+	c := iter.nextToken()
+	if c == 0 {
+		if iter.Error == io.EOF {
+			return nil
+		}
+		return iter.Error
+	}
+	iter.ReportError("Unmarshal", "there are bytes left after unmarshal")
+	return iter.Error
+}
+
+func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder {
+	stream := NewStream(cfg, writer, 512)
+	return &Encoder{stream}
+}
+
+func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
+	iter := Parse(cfg, reader, 512)
+	return &Decoder{iter}
+}
+
+func (cfg *frozenConfig) Valid(data []byte) bool {
+	iter := cfg.BorrowIterator(data)
+	defer cfg.ReturnIterator(iter)
+	iter.Skip()
+	return iter.Error == nil
+}
diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
new file mode 100644
index 0000000..3095662
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md
@@ -0,0 +1,7 @@
+| json type \ dest type | bool | int | uint | float |string|
+| --- | --- | --- | --- |--|--|
+| number | positive => true <br/> negative => true <br/> zero => false| 23.2 => 23 <br/> -32.1 => -32| 12.1 => 12 <br/> -12.1 => 0|as normal|same as origin|
+| string | empty string => false <br/> string "0" => false <br/> other strings => true | "123.32" => 123 <br/> "-123.4" => -123 <br/> "123.23xxxw" => 123 <br/>  "abcde12" => 0 <br/> "-32.1" => -32| 13.2 => 13 <br/> -1.1 => 0 |12.1 => 12.1 <br/> -12.3 => -12.3<br/> 12.4xxa => 12.4 <br/> +1.1e2 =>110 |same as origin|
+| bool | true => true <br/> false => false| true => 1 <br/> false => 0 | true => 1 <br/> false => 0 |true => 1 <br/>false => 0|true => "true" <br/> false => "false"|
+| object | true | 0 | 0 |0|originnal json|
+| array | empty array => false <br/> nonempty array => true| [] => 0 <br/> [1,2] => 1 | [] => 0 <br/> [1,2] => 1 |[] => 0<br/>[1,2] => 1|original json|
\ No newline at end of file
diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go
new file mode 100644
index 0000000..95ae54f
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter.go
@@ -0,0 +1,322 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+)
+
+// ValueType the type for JSON element
+type ValueType int
+
+const (
+	// InvalidValue invalid JSON element
+	InvalidValue ValueType = iota
+	// StringValue JSON element "string"
+	StringValue
+	// NumberValue JSON element 100 or 0.10
+	NumberValue
+	// NilValue JSON element null
+	NilValue
+	// BoolValue JSON element true or false
+	BoolValue
+	// ArrayValue JSON element []
+	ArrayValue
+	// ObjectValue JSON element {}
+	ObjectValue
+)
+
+var hexDigits []byte
+var valueTypes []ValueType
+
+func init() {
+	hexDigits = make([]byte, 256)
+	for i := 0; i < len(hexDigits); i++ {
+		hexDigits[i] = 255
+	}
+	for i := '0'; i <= '9'; i++ {
+		hexDigits[i] = byte(i - '0')
+	}
+	for i := 'a'; i <= 'f'; i++ {
+		hexDigits[i] = byte((i - 'a') + 10)
+	}
+	for i := 'A'; i <= 'F'; i++ {
+		hexDigits[i] = byte((i - 'A') + 10)
+	}
+	valueTypes = make([]ValueType, 256)
+	for i := 0; i < len(valueTypes); i++ {
+		valueTypes[i] = InvalidValue
+	}
+	valueTypes['"'] = StringValue
+	valueTypes['-'] = NumberValue
+	valueTypes['0'] = NumberValue
+	valueTypes['1'] = NumberValue
+	valueTypes['2'] = NumberValue
+	valueTypes['3'] = NumberValue
+	valueTypes['4'] = NumberValue
+	valueTypes['5'] = NumberValue
+	valueTypes['6'] = NumberValue
+	valueTypes['7'] = NumberValue
+	valueTypes['8'] = NumberValue
+	valueTypes['9'] = NumberValue
+	valueTypes['t'] = BoolValue
+	valueTypes['f'] = BoolValue
+	valueTypes['n'] = NilValue
+	valueTypes['['] = ArrayValue
+	valueTypes['{'] = ObjectValue
+}
+
+// Iterator is a io.Reader like object, with JSON specific read functions.
+// Error is not returned as return value, but stored as Error member on this iterator instance.
+type Iterator struct {
+	cfg              *frozenConfig
+	reader           io.Reader
+	buf              []byte
+	head             int
+	tail             int
+	captureStartedAt int
+	captured         []byte
+	Error            error
+	Attachment       interface{} // open for customized decoder
+}
+
+// NewIterator creates an empty Iterator instance
+func NewIterator(cfg API) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: nil,
+		buf:    nil,
+		head:   0,
+		tail:   0,
+	}
+}
+
+// Parse creates an Iterator instance from io.Reader
+func Parse(cfg API, reader io.Reader, bufSize int) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: reader,
+		buf:    make([]byte, bufSize),
+		head:   0,
+		tail:   0,
+	}
+}
+
+// ParseBytes creates an Iterator instance from byte array
+func ParseBytes(cfg API, input []byte) *Iterator {
+	return &Iterator{
+		cfg:    cfg.(*frozenConfig),
+		reader: nil,
+		buf:    input,
+		head:   0,
+		tail:   len(input),
+	}
+}
+
+// ParseString creates an Iterator instance from string
+func ParseString(cfg API, input string) *Iterator {
+	return ParseBytes(cfg, []byte(input))
+}
+
+// Pool returns a pool can provide more iterator with same configuration
+func (iter *Iterator) Pool() IteratorPool {
+	return iter.cfg
+}
+
+// Reset reuse iterator instance by specifying another reader
+func (iter *Iterator) Reset(reader io.Reader) *Iterator {
+	iter.reader = reader
+	iter.head = 0
+	iter.tail = 0
+	return iter
+}
+
+// ResetBytes reuse iterator instance by specifying another byte array as input
+func (iter *Iterator) ResetBytes(input []byte) *Iterator {
+	iter.reader = nil
+	iter.buf = input
+	iter.head = 0
+	iter.tail = len(input)
+	return iter
+}
+
+// WhatIsNext gets ValueType of relatively next json element
+func (iter *Iterator) WhatIsNext() ValueType {
+	valueType := valueTypes[iter.nextToken()]
+	iter.unreadByte()
+	return valueType
+}
+
+func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool {
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		switch c {
+		case ' ', '\n', '\t', '\r':
+			continue
+		}
+		iter.head = i
+		return false
+	}
+	return true
+}
+
+func (iter *Iterator) isObjectEnd() bool {
+	c := iter.nextToken()
+	if c == ',' {
+		return false
+	}
+	if c == '}' {
+		return true
+	}
+	iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
+	return true
+}
+
+func (iter *Iterator) nextToken() byte {
+	// a variation of skip whitespaces, returning the next non-whitespace token
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case ' ', '\n', '\t', '\r':
+				continue
+			}
+			iter.head = i + 1
+			return c
+		}
+		if !iter.loadMore() {
+			return 0
+		}
+	}
+}
+
+// ReportError record a error in iterator instance with current position.
+func (iter *Iterator) ReportError(operation string, msg string) {
+	if iter.Error != nil {
+		if iter.Error != io.EOF {
+			return
+		}
+	}
+	peekStart := iter.head - 10
+	if peekStart < 0 {
+		peekStart = 0
+	}
+	peekEnd := iter.head + 10
+	if peekEnd > iter.tail {
+		peekEnd = iter.tail
+	}
+	parsing := string(iter.buf[peekStart:peekEnd])
+	contextStart := iter.head - 50
+	if contextStart < 0 {
+		contextStart = 0
+	}
+	contextEnd := iter.head + 50
+	if contextEnd > iter.tail {
+		contextEnd = iter.tail
+	}
+	context := string(iter.buf[contextStart:contextEnd])
+	iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
+		operation, msg, iter.head-peekStart, parsing, context)
+}
+
+// CurrentBuffer gets current buffer as string for debugging purpose
+func (iter *Iterator) CurrentBuffer() string {
+	peekStart := iter.head - 10
+	if peekStart < 0 {
+		peekStart = 0
+	}
+	return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
+		string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
+}
+
+func (iter *Iterator) readByte() (ret byte) {
+	if iter.head == iter.tail {
+		if iter.loadMore() {
+			ret = iter.buf[iter.head]
+			iter.head++
+			return ret
+		}
+		return 0
+	}
+	ret = iter.buf[iter.head]
+	iter.head++
+	return ret
+}
+
+func (iter *Iterator) loadMore() bool {
+	if iter.reader == nil {
+		if iter.Error == nil {
+			iter.head = iter.tail
+			iter.Error = io.EOF
+		}
+		return false
+	}
+	if iter.captured != nil {
+		iter.captured = append(iter.captured,
+			iter.buf[iter.captureStartedAt:iter.tail]...)
+		iter.captureStartedAt = 0
+	}
+	for {
+		n, err := iter.reader.Read(iter.buf)
+		if n == 0 {
+			if err != nil {
+				if iter.Error == nil {
+					iter.Error = err
+				}
+				return false
+			}
+		} else {
+			iter.head = 0
+			iter.tail = n
+			return true
+		}
+	}
+}
+
+func (iter *Iterator) unreadByte() {
+	if iter.Error != nil {
+		return
+	}
+	iter.head--
+	return
+}
+
+// Read read the next JSON element as generic interface{}.
+func (iter *Iterator) Read() interface{} {
+	valueType := iter.WhatIsNext()
+	switch valueType {
+	case StringValue:
+		return iter.ReadString()
+	case NumberValue:
+		if iter.cfg.configBeforeFrozen.UseNumber {
+			return json.Number(iter.readNumberAsString())
+		}
+		return iter.ReadFloat64()
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		return nil
+	case BoolValue:
+		return iter.ReadBool()
+	case ArrayValue:
+		arr := []interface{}{}
+		iter.ReadArrayCB(func(iter *Iterator) bool {
+			var elem interface{}
+			iter.ReadVal(&elem)
+			arr = append(arr, elem)
+			return true
+		})
+		return arr
+	case ObjectValue:
+		obj := map[string]interface{}{}
+		iter.ReadMapCB(func(Iter *Iterator, field string) bool {
+			var elem interface{}
+			iter.ReadVal(&elem)
+			obj[field] = elem
+			return true
+		})
+		return obj
+	default:
+		iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType))
+		return nil
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go
new file mode 100644
index 0000000..6188cb4
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_array.go
@@ -0,0 +1,58 @@
+package jsoniter
+
+// ReadArray read array element, tells if the array has more element to read.
+func (iter *Iterator) ReadArray() (ret bool) {
+	c := iter.nextToken()
+	switch c {
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l')
+		return false // null
+	case '[':
+		c = iter.nextToken()
+		if c != ']' {
+			iter.unreadByte()
+			return true
+		}
+		return false
+	case ']':
+		return false
+	case ',':
+		return true
+	default:
+		iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
+		return
+	}
+}
+
+// ReadArrayCB read array with callback
+func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
+	c := iter.nextToken()
+	if c == '[' {
+		c = iter.nextToken()
+		if c != ']' {
+			iter.unreadByte()
+			if !callback(iter) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				if !callback(iter) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != ']' {
+				iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
+				return false
+			}
+			return true
+		}
+		return true
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
+	return false
+}
diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go
new file mode 100644
index 0000000..b975463
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_float.go
@@ -0,0 +1,339 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"io"
+	"math/big"
+	"strconv"
+	"strings"
+	"unsafe"
+)
+
+var floatDigits []int8
+
+const invalidCharForNumber = int8(-1)
+const endOfNumber = int8(-2)
+const dotInNumber = int8(-3)
+
+func init() {
+	floatDigits = make([]int8, 256)
+	for i := 0; i < len(floatDigits); i++ {
+		floatDigits[i] = invalidCharForNumber
+	}
+	for i := int8('0'); i <= int8('9'); i++ {
+		floatDigits[i] = i - int8('0')
+	}
+	floatDigits[','] = endOfNumber
+	floatDigits[']'] = endOfNumber
+	floatDigits['}'] = endOfNumber
+	floatDigits[' '] = endOfNumber
+	floatDigits['\t'] = endOfNumber
+	floatDigits['\n'] = endOfNumber
+	floatDigits['.'] = dotInNumber
+}
+
+// ReadBigFloat read big.Float
+func (iter *Iterator) ReadBigFloat() (ret *big.Float) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return nil
+	}
+	prec := 64
+	if len(str) > prec {
+		prec = len(str)
+	}
+	val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero)
+	if err != nil {
+		iter.Error = err
+		return nil
+	}
+	return val
+}
+
+// ReadBigInt read big.Int
+func (iter *Iterator) ReadBigInt() (ret *big.Int) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return nil
+	}
+	ret = big.NewInt(0)
+	var success bool
+	ret, success = ret.SetString(str, 10)
+	if !success {
+		iter.ReportError("ReadBigInt", "invalid big int")
+		return nil
+	}
+	return ret
+}
+
+//ReadFloat32 read float32
+func (iter *Iterator) ReadFloat32() (ret float32) {
+	c := iter.nextToken()
+	if c == '-' {
+		return -iter.readPositiveFloat32()
+	}
+	iter.unreadByte()
+	return iter.readPositiveFloat32()
+}
+
+func (iter *Iterator) readPositiveFloat32() (ret float32) {
+	i := iter.head
+	// first char
+	if i == iter.tail {
+		return iter.readFloat32SlowPath()
+	}
+	c := iter.buf[i]
+	i++
+	ind := floatDigits[c]
+	switch ind {
+	case invalidCharForNumber:
+		return iter.readFloat32SlowPath()
+	case endOfNumber:
+		iter.ReportError("readFloat32", "empty number")
+		return
+	case dotInNumber:
+		iter.ReportError("readFloat32", "leading dot is invalid")
+		return
+	case 0:
+		if i == iter.tail {
+			return iter.readFloat32SlowPath()
+		}
+		c = iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			iter.ReportError("readFloat32", "leading zero is invalid")
+			return
+		}
+	}
+	value := uint64(ind)
+	// chars before dot
+non_decimal_loop:
+	for ; i < iter.tail; i++ {
+		c = iter.buf[i]
+		ind := floatDigits[c]
+		switch ind {
+		case invalidCharForNumber:
+			return iter.readFloat32SlowPath()
+		case endOfNumber:
+			iter.head = i
+			return float32(value)
+		case dotInNumber:
+			break non_decimal_loop
+		}
+		if value > uint64SafeToMultiple10 {
+			return iter.readFloat32SlowPath()
+		}
+		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+	}
+	// chars after dot
+	if c == '.' {
+		i++
+		decimalPlaces := 0
+		if i == iter.tail {
+			return iter.readFloat32SlowPath()
+		}
+		for ; i < iter.tail; i++ {
+			c = iter.buf[i]
+			ind := floatDigits[c]
+			switch ind {
+			case endOfNumber:
+				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+					iter.head = i
+					return float32(float64(value) / float64(pow10[decimalPlaces]))
+				}
+				// too many decimal places
+				return iter.readFloat32SlowPath()
+			case invalidCharForNumber, dotInNumber:
+				return iter.readFloat32SlowPath()
+			}
+			decimalPlaces++
+			if value > uint64SafeToMultiple10 {
+				return iter.readFloat32SlowPath()
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+	}
+	return iter.readFloat32SlowPath()
+}
+
+func (iter *Iterator) readNumberAsString() (ret string) {
+	strBuf := [16]byte{}
+	str := strBuf[0:0]
+load_loop:
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+				str = append(str, c)
+				continue
+			default:
+				iter.head = i
+				break load_loop
+			}
+		}
+		if !iter.loadMore() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	if len(str) == 0 {
+		iter.ReportError("readNumberAsString", "invalid number")
+	}
+	return *(*string)(unsafe.Pointer(&str))
+}
+
+func (iter *Iterator) readFloat32SlowPath() (ret float32) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	errMsg := validateFloat(str)
+	if errMsg != "" {
+		iter.ReportError("readFloat32SlowPath", errMsg)
+		return
+	}
+	val, err := strconv.ParseFloat(str, 32)
+	if err != nil {
+		iter.Error = err
+		return
+	}
+	return float32(val)
+}
+
+// ReadFloat64 read float64
+func (iter *Iterator) ReadFloat64() (ret float64) {
+	c := iter.nextToken()
+	if c == '-' {
+		return -iter.readPositiveFloat64()
+	}
+	iter.unreadByte()
+	return iter.readPositiveFloat64()
+}
+
+func (iter *Iterator) readPositiveFloat64() (ret float64) {
+	i := iter.head
+	// first char
+	if i == iter.tail {
+		return iter.readFloat64SlowPath()
+	}
+	c := iter.buf[i]
+	i++
+	ind := floatDigits[c]
+	switch ind {
+	case invalidCharForNumber:
+		return iter.readFloat64SlowPath()
+	case endOfNumber:
+		iter.ReportError("readFloat64", "empty number")
+		return
+	case dotInNumber:
+		iter.ReportError("readFloat64", "leading dot is invalid")
+		return
+	case 0:
+		if i == iter.tail {
+			return iter.readFloat64SlowPath()
+		}
+		c = iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			iter.ReportError("readFloat64", "leading zero is invalid")
+			return
+		}
+	}
+	value := uint64(ind)
+	// chars before dot
+non_decimal_loop:
+	for ; i < iter.tail; i++ {
+		c = iter.buf[i]
+		ind := floatDigits[c]
+		switch ind {
+		case invalidCharForNumber:
+			return iter.readFloat64SlowPath()
+		case endOfNumber:
+			iter.head = i
+			return float64(value)
+		case dotInNumber:
+			break non_decimal_loop
+		}
+		if value > uint64SafeToMultiple10 {
+			return iter.readFloat64SlowPath()
+		}
+		value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind;
+	}
+	// chars after dot
+	if c == '.' {
+		i++
+		decimalPlaces := 0
+		if i == iter.tail {
+			return iter.readFloat64SlowPath()
+		}
+		for ; i < iter.tail; i++ {
+			c = iter.buf[i]
+			ind := floatDigits[c]
+			switch ind {
+			case endOfNumber:
+				if decimalPlaces > 0 && decimalPlaces < len(pow10) {
+					iter.head = i
+					return float64(value) / float64(pow10[decimalPlaces])
+				}
+				// too many decimal places
+				return iter.readFloat64SlowPath()
+			case invalidCharForNumber, dotInNumber:
+				return iter.readFloat64SlowPath()
+			}
+			decimalPlaces++
+			if value > uint64SafeToMultiple10 {
+				return iter.readFloat64SlowPath()
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+	}
+	return iter.readFloat64SlowPath()
+}
+
+func (iter *Iterator) readFloat64SlowPath() (ret float64) {
+	str := iter.readNumberAsString()
+	if iter.Error != nil && iter.Error != io.EOF {
+		return
+	}
+	errMsg := validateFloat(str)
+	if errMsg != "" {
+		iter.ReportError("readFloat64SlowPath", errMsg)
+		return
+	}
+	val, err := strconv.ParseFloat(str, 64)
+	if err != nil {
+		iter.Error = err
+		return
+	}
+	return val
+}
+
+func validateFloat(str string) string {
+	// strconv.ParseFloat is not validating `1.` or `1.e1`
+	if len(str) == 0 {
+		return "empty number"
+	}
+	if str[0] == '-' {
+		return "-- is not valid"
+	}
+	dotPos := strings.IndexByte(str, '.')
+	if dotPos != -1 {
+		if dotPos == len(str)-1 {
+			return "dot can not be last character"
+		}
+		switch str[dotPos+1] {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		default:
+			return "missing digit after dot"
+		}
+	}
+	return ""
+}
+
+// ReadNumber read json.Number
+func (iter *Iterator) ReadNumber() (ret json.Number) {
+	return json.Number(iter.readNumberAsString())
+}
diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go
new file mode 100644
index 0000000..2142320
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_int.go
@@ -0,0 +1,345 @@
+package jsoniter
+
+import (
+	"math"
+	"strconv"
+)
+
+var intDigits []int8
+
+const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1
+const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1
+
+func init() {
+	intDigits = make([]int8, 256)
+	for i := 0; i < len(intDigits); i++ {
+		intDigits[i] = invalidCharForNumber
+	}
+	for i := int8('0'); i <= int8('9'); i++ {
+		intDigits[i] = i - int8('0')
+	}
+}
+
+// ReadUint read uint
+func (iter *Iterator) ReadUint() uint {
+	if strconv.IntSize == 32 {
+		return uint(iter.ReadUint32())
+	}
+	return uint(iter.ReadUint64())
+}
+
+// ReadInt read int
+func (iter *Iterator) ReadInt() int {
+	if strconv.IntSize == 32 {
+		return int(iter.ReadInt32())
+	}
+	return int(iter.ReadInt64())
+}
+
+// ReadInt8 read int8
+func (iter *Iterator) ReadInt8() (ret int8) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt8+1 {
+			iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int8(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt8 {
+		iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int8(val)
+}
+
+// ReadUint8 read uint8
+func (iter *Iterator) ReadUint8() (ret uint8) {
+	val := iter.readUint32(iter.nextToken())
+	if val > math.MaxUint8 {
+		iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return uint8(val)
+}
+
+// ReadInt16 read int16
+func (iter *Iterator) ReadInt16() (ret int16) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt16+1 {
+			iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int16(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt16 {
+		iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int16(val)
+}
+
+// ReadUint16 read uint16
+func (iter *Iterator) ReadUint16() (ret uint16) {
+	val := iter.readUint32(iter.nextToken())
+	if val > math.MaxUint16 {
+		iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return uint16(val)
+}
+
+// ReadInt32 read int32
+func (iter *Iterator) ReadInt32() (ret int32) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint32(iter.readByte())
+		if val > math.MaxInt32+1 {
+			iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+			return
+		}
+		return -int32(val)
+	}
+	val := iter.readUint32(c)
+	if val > math.MaxInt32 {
+		iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10))
+		return
+	}
+	return int32(val)
+}
+
+// ReadUint32 read uint32
+func (iter *Iterator) ReadUint32() (ret uint32) {
+	return iter.readUint32(iter.nextToken())
+}
+
+func (iter *Iterator) readUint32(c byte) (ret uint32) {
+	ind := intDigits[c]
+	if ind == 0 {
+		iter.assertInteger()
+		return 0 // single zero
+	}
+	if ind == invalidCharForNumber {
+		iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)}))
+		return
+	}
+	value := uint32(ind)
+	if iter.tail-iter.head > 10 {
+		i := iter.head
+		ind2 := intDigits[iter.buf[i]]
+		if ind2 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value
+		}
+		i++
+		ind3 := intDigits[iter.buf[i]]
+		if ind3 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10 + uint32(ind2)
+		}
+		//iter.head = i + 1
+		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+		i++
+		ind4 := intDigits[iter.buf[i]]
+		if ind4 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100 + uint32(ind2)*10 + uint32(ind3)
+		}
+		i++
+		ind5 := intDigits[iter.buf[i]]
+		if ind5 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
+		}
+		i++
+		ind6 := intDigits[iter.buf[i]]
+		if ind6 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
+		}
+		i++
+		ind7 := intDigits[iter.buf[i]]
+		if ind7 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
+		}
+		i++
+		ind8 := intDigits[iter.buf[i]]
+		if ind8 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
+		}
+		i++
+		ind9 := intDigits[iter.buf[i]]
+		value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
+		iter.head = i
+		if ind9 == invalidCharForNumber {
+			iter.assertInteger()
+			return value
+		}
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			ind = intDigits[iter.buf[i]]
+			if ind == invalidCharForNumber {
+				iter.head = i
+				iter.assertInteger()
+				return value
+			}
+			if value > uint32SafeToMultiply10 {
+				value2 := (value << 3) + (value << 1) + uint32(ind)
+				if value2 < value {
+					iter.ReportError("readUint32", "overflow")
+					return
+				}
+				value = value2
+				continue
+			}
+			value = (value << 3) + (value << 1) + uint32(ind)
+		}
+		if !iter.loadMore() {
+			iter.assertInteger()
+			return value
+		}
+	}
+}
+
+// ReadInt64 read int64
+func (iter *Iterator) ReadInt64() (ret int64) {
+	c := iter.nextToken()
+	if c == '-' {
+		val := iter.readUint64(iter.readByte())
+		if val > math.MaxInt64+1 {
+			iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+			return
+		}
+		return -int64(val)
+	}
+	val := iter.readUint64(c)
+	if val > math.MaxInt64 {
+		iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10))
+		return
+	}
+	return int64(val)
+}
+
+// ReadUint64 read uint64
+func (iter *Iterator) ReadUint64() uint64 {
+	return iter.readUint64(iter.nextToken())
+}
+
+func (iter *Iterator) readUint64(c byte) (ret uint64) {
+	ind := intDigits[c]
+	if ind == 0 {
+		iter.assertInteger()
+		return 0 // single zero
+	}
+	if ind == invalidCharForNumber {
+		iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)}))
+		return
+	}
+	value := uint64(ind)
+	if iter.tail-iter.head > 10 {
+		i := iter.head
+		ind2 := intDigits[iter.buf[i]]
+		if ind2 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value
+		}
+		i++
+		ind3 := intDigits[iter.buf[i]]
+		if ind3 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10 + uint64(ind2)
+		}
+		//iter.head = i + 1
+		//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
+		i++
+		ind4 := intDigits[iter.buf[i]]
+		if ind4 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100 + uint64(ind2)*10 + uint64(ind3)
+		}
+		i++
+		ind5 := intDigits[iter.buf[i]]
+		if ind5 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
+		}
+		i++
+		ind6 := intDigits[iter.buf[i]]
+		if ind6 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
+		}
+		i++
+		ind7 := intDigits[iter.buf[i]]
+		if ind7 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
+		}
+		i++
+		ind8 := intDigits[iter.buf[i]]
+		if ind8 == invalidCharForNumber {
+			iter.head = i
+			iter.assertInteger()
+			return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
+		}
+		i++
+		ind9 := intDigits[iter.buf[i]]
+		value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
+		iter.head = i
+		if ind9 == invalidCharForNumber {
+			iter.assertInteger()
+			return value
+		}
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			ind = intDigits[iter.buf[i]]
+			if ind == invalidCharForNumber {
+				iter.head = i
+				iter.assertInteger()
+				return value
+			}
+			if value > uint64SafeToMultiple10 {
+				value2 := (value << 3) + (value << 1) + uint64(ind)
+				if value2 < value {
+					iter.ReportError("readUint64", "overflow")
+					return
+				}
+				value = value2
+				continue
+			}
+			value = (value << 3) + (value << 1) + uint64(ind)
+		}
+		if !iter.loadMore() {
+			iter.assertInteger()
+			return value
+		}
+	}
+}
+
+func (iter *Iterator) assertInteger() {
+	if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
+		iter.ReportError("assertInteger", "can not decode float as int")
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go
new file mode 100644
index 0000000..1c57576
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_object.go
@@ -0,0 +1,251 @@
+package jsoniter
+
+import (
+	"fmt"
+	"strings"
+)
+
+// ReadObject read one field from object.
+// If object ended, returns empty string.
+// Otherwise, returns the field name.
+func (iter *Iterator) ReadObject() (ret string) {
+	c := iter.nextToken()
+	switch c {
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l')
+		return "" // null
+	case '{':
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field := iter.ReadString()
+			c = iter.nextToken()
+			if c != ':' {
+				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+			}
+			return field
+		}
+		if c == '}' {
+			return "" // end of object
+		}
+		iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
+		return
+	case ',':
+		field := iter.ReadString()
+		c = iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+		}
+		return field
+	case '}':
+		return "" // end of object
+	default:
+		iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c})))
+		return
+	}
+}
+
+// CaseInsensitive
+func (iter *Iterator) readFieldHash() int64 {
+	hash := int64(0x811c9dc5)
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c}))
+		return 0
+	}
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			// require ascii string and no escape
+			b := iter.buf[i]
+			if b == '\\' {
+				iter.head = i
+				for _, b := range iter.readStringSlowPath() {
+					if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+						b += 'a' - 'A'
+					}
+					hash ^= int64(b)
+					hash *= 0x1000193
+				}
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+					return 0
+				}
+				return hash
+			}
+			if b == '"' {
+				iter.head = i + 1
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
+					return 0
+				}
+				return hash
+			}
+			if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive {
+				b += 'a' - 'A'
+			}
+			hash ^= int64(b)
+			hash *= 0x1000193
+		}
+		if !iter.loadMore() {
+			iter.ReportError("readFieldHash", `incomplete field name`)
+			return 0
+		}
+	}
+}
+
+func calcHash(str string, caseSensitive bool) int64 {
+	if !caseSensitive {
+		str = strings.ToLower(str)
+	}
+	hash := int64(0x811c9dc5)
+	for _, b := range []byte(str) {
+		hash ^= int64(b)
+		hash *= 0x1000193
+	}
+	return int64(hash)
+}
+
+// ReadObjectCB read object with callback, the key is ascii only and field name not copied
+func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
+	c := iter.nextToken()
+	var field string
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field = iter.ReadString()
+			c = iter.nextToken()
+			if c != ':' {
+				iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+			}
+			if !callback(iter, field) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				field = iter.ReadString()
+				c = iter.nextToken()
+				if c != ':' {
+					iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+				}
+				if !callback(iter, field) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != '}' {
+				iter.ReportError("ReadObjectCB", `object not ended with }`)
+				return false
+			}
+			return true
+		}
+		if c == '}' {
+			return true
+		}
+		iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
+		return false
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
+	return false
+}
+
+// ReadMapCB read map with callback, the key can be any string
+func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
+	c := iter.nextToken()
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '"' {
+			iter.unreadByte()
+			field := iter.ReadString()
+			if iter.nextToken() != ':' {
+				iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+				return false
+			}
+			if !callback(iter, field) {
+				return false
+			}
+			c = iter.nextToken()
+			for c == ',' {
+				field = iter.ReadString()
+				if iter.nextToken() != ':' {
+					iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+					return false
+				}
+				if !callback(iter, field) {
+					return false
+				}
+				c = iter.nextToken()
+			}
+			if c != '}' {
+				iter.ReportError("ReadMapCB", `object not ended with }`)
+				return false
+			}
+			return true
+		}
+		if c == '}' {
+			return true
+		}
+		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+		return false
+	}
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return true // null
+	}
+	iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+	return false
+}
+
+func (iter *Iterator) readObjectStart() bool {
+	c := iter.nextToken()
+	if c == '{' {
+		c = iter.nextToken()
+		if c == '}' {
+			return false
+		}
+		iter.unreadByte()
+		return true
+	} else if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return false
+	}
+	iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
+	return false
+}
+
+func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
+	str := iter.ReadStringAsSlice()
+	if iter.skipWhitespacesWithoutLoadMore() {
+		if ret == nil {
+			ret = make([]byte, len(str))
+			copy(ret, str)
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+	if iter.buf[iter.head] != ':' {
+		iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
+		return
+	}
+	iter.head++
+	if iter.skipWhitespacesWithoutLoadMore() {
+		if ret == nil {
+			ret = make([]byte, len(str))
+			copy(ret, str)
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+	if ret == nil {
+		return str
+	}
+	return ret
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go
new file mode 100644
index 0000000..f58beb9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip.go
@@ -0,0 +1,129 @@
+package jsoniter
+
+import "fmt"
+
+// ReadNil reads a json object as nil and
+// returns whether it's a nil or not
+func (iter *Iterator) ReadNil() (ret bool) {
+	c := iter.nextToken()
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l') // null
+		return true
+	}
+	iter.unreadByte()
+	return false
+}
+
+// ReadBool reads a json object as BoolValue
+func (iter *Iterator) ReadBool() (ret bool) {
+	c := iter.nextToken()
+	if c == 't' {
+		iter.skipThreeBytes('r', 'u', 'e')
+		return true
+	}
+	if c == 'f' {
+		iter.skipFourBytes('a', 'l', 's', 'e')
+		return false
+	}
+	iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
+	return
+}
+
+// SkipAndReturnBytes skip next JSON element, and return its content as []byte.
+// The []byte can be kept, it is a copy of data.
+func (iter *Iterator) SkipAndReturnBytes() []byte {
+	iter.startCapture(iter.head)
+	iter.Skip()
+	return iter.stopCapture()
+}
+
+type captureBuffer struct {
+	startedAt int
+	captured  []byte
+}
+
+func (iter *Iterator) startCapture(captureStartedAt int) {
+	if iter.captured != nil {
+		panic("already in capture mode")
+	}
+	iter.captureStartedAt = captureStartedAt
+	iter.captured = make([]byte, 0, 32)
+}
+
+func (iter *Iterator) stopCapture() []byte {
+	if iter.captured == nil {
+		panic("not in capture mode")
+	}
+	captured := iter.captured
+	remaining := iter.buf[iter.captureStartedAt:iter.head]
+	iter.captureStartedAt = -1
+	iter.captured = nil
+	if len(captured) == 0 {
+		copied := make([]byte, len(remaining))
+		copy(copied, remaining)
+		return copied
+	}
+	captured = append(captured, remaining...)
+	return captured
+}
+
+// Skip skips a json object and positions to relatively the next json object
+func (iter *Iterator) Skip() {
+	c := iter.nextToken()
+	switch c {
+	case '"':
+		iter.skipString()
+	case 'n':
+		iter.skipThreeBytes('u', 'l', 'l') // null
+	case 't':
+		iter.skipThreeBytes('r', 'u', 'e') // true
+	case 'f':
+		iter.skipFourBytes('a', 'l', 's', 'e') // false
+	case '0':
+		iter.unreadByte()
+		iter.ReadFloat32()
+	case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		iter.skipNumber()
+	case '[':
+		iter.skipArray()
+	case '{':
+		iter.skipObject()
+	default:
+		iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c))
+		return
+	}
+}
+
+func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) {
+	if iter.readByte() != b1 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b2 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b3 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+	if iter.readByte() != b4 {
+		iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4})))
+		return
+	}
+}
+
+func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) {
+	if iter.readByte() != b1 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+	if iter.readByte() != b2 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+	if iter.readByte() != b3 {
+		iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3})))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
new file mode 100644
index 0000000..8fcdc3b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go
@@ -0,0 +1,144 @@
+//+build jsoniter_sloppy
+
+package jsoniter
+
+// sloppy but faster implementation, do not validate the input json
+
+func (iter *Iterator) skipNumber() {
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			switch c {
+			case ' ', '\n', '\r', '\t', ',', '}', ']':
+				iter.head = i
+				return
+			}
+		}
+		if !iter.loadMore() {
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipArray() {
+	level := 1
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			switch iter.buf[i] {
+			case '"': // If inside string, skip it
+				iter.head = i + 1
+				iter.skipString()
+				i = iter.head - 1 // it will be i++ soon
+			case '[': // If open symbol, increase level
+				level++
+			case ']': // If close symbol, increase level
+				level--
+
+				// If we have returned to the original level, we're done
+				if level == 0 {
+					iter.head = i + 1
+					return
+				}
+			}
+		}
+		if !iter.loadMore() {
+			iter.ReportError("skipObject", "incomplete array")
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipObject() {
+	level := 1
+	for {
+		for i := iter.head; i < iter.tail; i++ {
+			switch iter.buf[i] {
+			case '"': // If inside string, skip it
+				iter.head = i + 1
+				iter.skipString()
+				i = iter.head - 1 // it will be i++ soon
+			case '{': // If open symbol, increase level
+				level++
+			case '}': // If close symbol, increase level
+				level--
+
+				// If we have returned to the original level, we're done
+				if level == 0 {
+					iter.head = i + 1
+					return
+				}
+			}
+		}
+		if !iter.loadMore() {
+			iter.ReportError("skipObject", "incomplete object")
+			return
+		}
+	}
+}
+
+func (iter *Iterator) skipString() {
+	for {
+		end, escaped := iter.findStringEnd()
+		if end == -1 {
+			if !iter.loadMore() {
+				iter.ReportError("skipString", "incomplete string")
+				return
+			}
+			if escaped {
+				iter.head = 1 // skip the first char as last char read is \
+			}
+		} else {
+			iter.head = end
+			return
+		}
+	}
+}
+
+// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go
+// Tries to find the end of string
+// Support if string contains escaped quote symbols.
+func (iter *Iterator) findStringEnd() (int, bool) {
+	escaped := false
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		if c == '"' {
+			if !escaped {
+				return i + 1, false
+			}
+			j := i - 1
+			for {
+				if j < iter.head || iter.buf[j] != '\\' {
+					// even number of backslashes
+					// either end of buffer, or " found
+					return i + 1, true
+				}
+				j--
+				if j < iter.head || iter.buf[j] != '\\' {
+					// odd number of backslashes
+					// it is \" or \\\"
+					break
+				}
+				j--
+			}
+		} else if c == '\\' {
+			escaped = true
+		}
+	}
+	j := iter.tail - 1
+	for {
+		if j < iter.head || iter.buf[j] != '\\' {
+			// even number of backslashes
+			// either end of buffer, or " found
+			return -1, false // do not end with \
+		}
+		j--
+		if j < iter.head || iter.buf[j] != '\\' {
+			// odd number of backslashes
+			// it is \" or \\\"
+			break
+		}
+		j--
+
+	}
+	return -1, true // end with \
+}
diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go
new file mode 100644
index 0000000..6cf66d0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go
@@ -0,0 +1,99 @@
+//+build !jsoniter_sloppy
+
+package jsoniter
+
+import (
+	"fmt"
+	"io"
+)
+
+func (iter *Iterator) skipNumber() {
+	if !iter.trySkipNumber() {
+		iter.unreadByte()
+		if iter.Error != nil && iter.Error != io.EOF {
+			return
+		}
+		iter.ReadFloat64()
+		if iter.Error != nil && iter.Error != io.EOF {
+			iter.Error = nil
+			iter.ReadBigFloat()
+		}
+	}
+}
+
+func (iter *Iterator) trySkipNumber() bool {
+	dotFound := false
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+		case '.':
+			if dotFound {
+				iter.ReportError("validateNumber", `more than one dot found in number`)
+				return true // already failed
+			}
+			if i+1 == iter.tail {
+				return false
+			}
+			c = iter.buf[i+1]
+			switch c {
+			case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			default:
+				iter.ReportError("validateNumber", `missing digit after dot`)
+				return true // already failed
+			}
+			dotFound = true
+		default:
+			switch c {
+			case ',', ']', '}', ' ', '\t', '\n', '\r':
+				if iter.head == i {
+					return false // if - without following digits
+				}
+				iter.head = i
+				return true // must be valid
+			}
+			return false // may be invalid
+		}
+	}
+	return false
+}
+
+func (iter *Iterator) skipString() {
+	if !iter.trySkipString() {
+		iter.unreadByte()
+		iter.ReadString()
+	}
+}
+
+func (iter *Iterator) trySkipString() bool {
+	for i := iter.head; i < iter.tail; i++ {
+		c := iter.buf[i]
+		if c == '"' {
+			iter.head = i + 1
+			return true // valid
+		} else if c == '\\' {
+			return false
+		} else if c < ' ' {
+			iter.ReportError("trySkipString",
+				fmt.Sprintf(`invalid control character found: %d`, c))
+			return true // already failed
+		}
+	}
+	return false
+}
+
+func (iter *Iterator) skipObject() {
+	iter.unreadByte()
+	iter.ReadObjectCB(func(iter *Iterator, field string) bool {
+		iter.Skip()
+		return true
+	})
+}
+
+func (iter *Iterator) skipArray() {
+	iter.unreadByte()
+	iter.ReadArrayCB(func(iter *Iterator) bool {
+		iter.Skip()
+		return true
+	})
+}
diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go
new file mode 100644
index 0000000..adc487e
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/iter_str.go
@@ -0,0 +1,215 @@
+package jsoniter
+
+import (
+	"fmt"
+	"unicode/utf16"
+)
+
+// ReadString read string from iterator
+func (iter *Iterator) ReadString() (ret string) {
+	c := iter.nextToken()
+	if c == '"' {
+		for i := iter.head; i < iter.tail; i++ {
+			c := iter.buf[i]
+			if c == '"' {
+				ret = string(iter.buf[iter.head:i])
+				iter.head = i + 1
+				return ret
+			} else if c == '\\' {
+				break
+			} else if c < ' ' {
+				iter.ReportError("ReadString",
+					fmt.Sprintf(`invalid control character found: %d`, c))
+				return
+			}
+		}
+		return iter.readStringSlowPath()
+	} else if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return ""
+	}
+	iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
+	return
+}
+
+func (iter *Iterator) readStringSlowPath() (ret string) {
+	var str []byte
+	var c byte
+	for iter.Error == nil {
+		c = iter.readByte()
+		if c == '"' {
+			return string(str)
+		}
+		if c == '\\' {
+			c = iter.readByte()
+			str = iter.readEscapedChar(c, str)
+		} else {
+			str = append(str, c)
+		}
+	}
+	iter.ReportError("readStringSlowPath", "unexpected end of input")
+	return
+}
+
+func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
+	switch c {
+	case 'u':
+		r := iter.readU4()
+		if utf16.IsSurrogate(r) {
+			c = iter.readByte()
+			if iter.Error != nil {
+				return nil
+			}
+			if c != '\\' {
+				iter.unreadByte()
+				str = appendRune(str, r)
+				return str
+			}
+			c = iter.readByte()
+			if iter.Error != nil {
+				return nil
+			}
+			if c != 'u' {
+				str = appendRune(str, r)
+				return iter.readEscapedChar(c, str)
+			}
+			r2 := iter.readU4()
+			if iter.Error != nil {
+				return nil
+			}
+			combined := utf16.DecodeRune(r, r2)
+			if combined == '\uFFFD' {
+				str = appendRune(str, r)
+				str = appendRune(str, r2)
+			} else {
+				str = appendRune(str, combined)
+			}
+		} else {
+			str = appendRune(str, r)
+		}
+	case '"':
+		str = append(str, '"')
+	case '\\':
+		str = append(str, '\\')
+	case '/':
+		str = append(str, '/')
+	case 'b':
+		str = append(str, '\b')
+	case 'f':
+		str = append(str, '\f')
+	case 'n':
+		str = append(str, '\n')
+	case 'r':
+		str = append(str, '\r')
+	case 't':
+		str = append(str, '\t')
+	default:
+		iter.ReportError("readEscapedChar",
+			`invalid escape char after \`)
+		return nil
+	}
+	return str
+}
+
+// ReadStringAsSlice read string from iterator without copying into string form.
+// The []byte can not be kept, as it will change after next iterator call.
+func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
+	c := iter.nextToken()
+	if c == '"' {
+		for i := iter.head; i < iter.tail; i++ {
+			// require ascii string and no escape
+			// for: field name, base64, number
+			if iter.buf[i] == '"' {
+				// fast path: reuse the underlying buffer
+				ret = iter.buf[iter.head:i]
+				iter.head = i + 1
+				return ret
+			}
+		}
+		readLen := iter.tail - iter.head
+		copied := make([]byte, readLen, readLen*2)
+		copy(copied, iter.buf[iter.head:iter.tail])
+		iter.head = iter.tail
+		for iter.Error == nil {
+			c := iter.readByte()
+			if c == '"' {
+				return copied
+			}
+			copied = append(copied, c)
+		}
+		return copied
+	}
+	iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
+	return
+}
+
+func (iter *Iterator) readU4() (ret rune) {
+	for i := 0; i < 4; i++ {
+		c := iter.readByte()
+		if iter.Error != nil {
+			return
+		}
+		if c >= '0' && c <= '9' {
+			ret = ret*16 + rune(c-'0')
+		} else if c >= 'a' && c <= 'f' {
+			ret = ret*16 + rune(c-'a'+10)
+		} else if c >= 'A' && c <= 'F' {
+			ret = ret*16 + rune(c-'A'+10)
+		} else {
+			iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
+			return
+		}
+	}
+	return ret
+}
+
+const (
+	t1 = 0x00 // 0000 0000
+	tx = 0x80 // 1000 0000
+	t2 = 0xC0 // 1100 0000
+	t3 = 0xE0 // 1110 0000
+	t4 = 0xF0 // 1111 0000
+	t5 = 0xF8 // 1111 1000
+
+	maskx = 0x3F // 0011 1111
+	mask2 = 0x1F // 0001 1111
+	mask3 = 0x0F // 0000 1111
+	mask4 = 0x07 // 0000 0111
+
+	rune1Max = 1<<7 - 1
+	rune2Max = 1<<11 - 1
+	rune3Max = 1<<16 - 1
+
+	surrogateMin = 0xD800
+	surrogateMax = 0xDFFF
+
+	maxRune   = '\U0010FFFF' // Maximum valid Unicode code point.
+	runeError = '\uFFFD'     // the "error" Rune or "Unicode replacement character"
+)
+
+func appendRune(p []byte, r rune) []byte {
+	// Negative values are erroneous. Making it unsigned addresses the problem.
+	switch i := uint32(r); {
+	case i <= rune1Max:
+		p = append(p, byte(r))
+		return p
+	case i <= rune2Max:
+		p = append(p, t2|byte(r>>6))
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	case i > maxRune, surrogateMin <= i && i <= surrogateMax:
+		r = runeError
+		fallthrough
+	case i <= rune3Max:
+		p = append(p, t3|byte(r>>12))
+		p = append(p, tx|byte(r>>6)&maskx)
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	default:
+		p = append(p, t4|byte(r>>18))
+		p = append(p, tx|byte(r>>12)&maskx)
+		p = append(p, tx|byte(r>>6)&maskx)
+		p = append(p, tx|byte(r)&maskx)
+		return p
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go
new file mode 100644
index 0000000..c2934f9
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/jsoniter.go
@@ -0,0 +1,18 @@
+// Package jsoniter implements encoding and decoding of JSON as defined in
+// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json.
+// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter
+// and variable type declarations (if any).
+// jsoniter interfaces gives 100% compatibility with code using standard lib.
+//
+// "JSON and Go"
+// (https://golang.org/doc/articles/json_and_go.html)
+// gives a description of how Marshal/Unmarshal operate
+// between arbitrary or predefined json objects and bytes,
+// and it applies to jsoniter.Marshal/Unmarshal as well.
+//
+// Besides, jsoniter.Iterator provides a different set of interfaces
+// iterating given bytes/string/reader
+// and yielding parsed elements one by one.
+// This set of interfaces reads input as required and gives
+// better performance.
+package jsoniter
diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go
new file mode 100644
index 0000000..e2389b5
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/pool.go
@@ -0,0 +1,42 @@
+package jsoniter
+
+import (
+	"io"
+)
+
+// IteratorPool a thread safe pool of iterators with same configuration
+type IteratorPool interface {
+	BorrowIterator(data []byte) *Iterator
+	ReturnIterator(iter *Iterator)
+}
+
+// StreamPool a thread safe pool of streams with same configuration
+type StreamPool interface {
+	BorrowStream(writer io.Writer) *Stream
+	ReturnStream(stream *Stream)
+}
+
+func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
+	stream := cfg.streamPool.Get().(*Stream)
+	stream.Reset(writer)
+	return stream
+}
+
+func (cfg *frozenConfig) ReturnStream(stream *Stream) {
+	stream.out = nil
+	stream.Error = nil
+	stream.Attachment = nil
+	cfg.streamPool.Put(stream)
+}
+
+func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
+	iter := cfg.iteratorPool.Get().(*Iterator)
+	iter.ResetBytes(data)
+	return iter
+}
+
+func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
+	iter.Error = nil
+	iter.Attachment = nil
+	cfg.iteratorPool.Put(iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go
new file mode 100644
index 0000000..4459e20
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect.go
@@ -0,0 +1,332 @@
+package jsoniter
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+// ValDecoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValDecoder with json.Decoder.
+// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link).
+//
+// Reflection on type to create decoders, which is then cached
+// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions
+// 1. create instance of new value, for example *int will need a int to be allocated
+// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New
+// 3. assignment to map, both key and value will be reflect.Value
+// For a simple struct binding, it will be reflect.Value free and allocation free
+type ValDecoder interface {
+	Decode(ptr unsafe.Pointer, iter *Iterator)
+}
+
+// ValEncoder is an internal type registered to cache as needed.
+// Don't confuse jsoniter.ValEncoder with json.Encoder.
+// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link).
+type ValEncoder interface {
+	IsEmpty(ptr unsafe.Pointer) bool
+	Encode(ptr unsafe.Pointer, stream *Stream)
+}
+
+type checkIsEmpty interface {
+	IsEmpty(ptr unsafe.Pointer) bool
+}
+
+type ctx struct {
+	*frozenConfig
+	prefix   string
+	encoders map[reflect2.Type]ValEncoder
+	decoders map[reflect2.Type]ValDecoder
+}
+
+func (b *ctx) caseSensitive() bool {
+	if b.frozenConfig == nil {
+		// default is case-insensitive
+		return false
+	}
+	return b.frozenConfig.caseSensitive
+}
+
+func (b *ctx) append(prefix string) *ctx {
+	return &ctx{
+		frozenConfig: b.frozenConfig,
+		prefix:       b.prefix + " " + prefix,
+		encoders:     b.encoders,
+		decoders:     b.decoders,
+	}
+}
+
+// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal
+func (iter *Iterator) ReadVal(obj interface{}) {
+	cacheKey := reflect2.RTypeOf(obj)
+	decoder := iter.cfg.getDecoderFromCache(cacheKey)
+	if decoder == nil {
+		typ := reflect2.TypeOf(obj)
+		if typ.Kind() != reflect.Ptr {
+			iter.ReportError("ReadVal", "can only unmarshal into pointer")
+			return
+		}
+		decoder = iter.cfg.DecoderOf(typ)
+	}
+	ptr := reflect2.PtrOf(obj)
+	if ptr == nil {
+		iter.ReportError("ReadVal", "can not read into nil pointer")
+		return
+	}
+	decoder.Decode(ptr, iter)
+}
+
+// WriteVal copy the go interface into underlying JSON, same as json.Marshal
+func (stream *Stream) WriteVal(val interface{}) {
+	if nil == val {
+		stream.WriteNil()
+		return
+	}
+	cacheKey := reflect2.RTypeOf(val)
+	encoder := stream.cfg.getEncoderFromCache(cacheKey)
+	if encoder == nil {
+		typ := reflect2.TypeOf(val)
+		encoder = stream.cfg.EncoderOf(typ)
+	}
+	encoder.Encode(reflect2.PtrOf(val), stream)
+}
+
+func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder {
+	cacheKey := typ.RType()
+	decoder := cfg.getDecoderFromCache(cacheKey)
+	if decoder != nil {
+		return decoder
+	}
+	ctx := &ctx{
+		frozenConfig: cfg,
+		prefix:       "",
+		decoders:     map[reflect2.Type]ValDecoder{},
+		encoders:     map[reflect2.Type]ValEncoder{},
+	}
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	decoder = decoderOfType(ctx, ptrType.Elem())
+	cfg.addDecoderToCache(cacheKey, decoder)
+	return decoder
+}
+
+func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := getTypeDecoderFromExtension(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfType(ctx, typ)
+	for _, extension := range extensions {
+		decoder = extension.DecorateDecoder(typ, decoder)
+	}
+	decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+	for _, extension := range ctx.extraExtensions {
+		decoder = extension.DecorateDecoder(typ, decoder)
+	}
+	return decoder
+}
+
+func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := ctx.decoders[typ]
+	if decoder != nil {
+		return decoder
+	}
+	placeholder := &placeholderDecoder{}
+	ctx.decoders[typ] = placeholder
+	decoder = _createDecoderOfType(ctx, typ)
+	placeholder.decoder = decoder
+	return decoder
+}
+
+func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := createDecoderOfJsonRawMessage(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfJsonNumber(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfMarshaler(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfAny(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	decoder = createDecoderOfNative(ctx, typ)
+	if decoder != nil {
+		return decoder
+	}
+	switch typ.Kind() {
+	case reflect.Interface:
+		ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType)
+		if isIFace {
+			return &ifaceDecoder{valType: ifaceType}
+		}
+		return &efaceDecoder{}
+	case reflect.Struct:
+		return decoderOfStruct(ctx, typ)
+	case reflect.Array:
+		return decoderOfArray(ctx, typ)
+	case reflect.Slice:
+		return decoderOfSlice(ctx, typ)
+	case reflect.Map:
+		return decoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return decoderOfOptional(ctx, typ)
+	default:
+		return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+	}
+}
+
+func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder {
+	cacheKey := typ.RType()
+	encoder := cfg.getEncoderFromCache(cacheKey)
+	if encoder != nil {
+		return encoder
+	}
+	ctx := &ctx{
+		frozenConfig: cfg,
+		prefix:       "",
+		decoders:     map[reflect2.Type]ValDecoder{},
+		encoders:     map[reflect2.Type]ValEncoder{},
+	}
+	encoder = encoderOfType(ctx, typ)
+	if typ.LikePtr() {
+		encoder = &onePtrEncoder{encoder}
+	}
+	cfg.addEncoderToCache(cacheKey, encoder)
+	return encoder
+}
+
+type onePtrEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := getTypeEncoderFromExtension(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfType(ctx, typ)
+	for _, extension := range extensions {
+		encoder = extension.DecorateEncoder(typ, encoder)
+	}
+	encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+	for _, extension := range ctx.extraExtensions {
+		encoder = extension.DecorateEncoder(typ, encoder)
+	}
+	return encoder
+}
+
+func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := ctx.encoders[typ]
+	if encoder != nil {
+		return encoder
+	}
+	placeholder := &placeholderEncoder{}
+	ctx.encoders[typ] = placeholder
+	encoder = _createEncoderOfType(ctx, typ)
+	placeholder.encoder = encoder
+	return encoder
+}
+func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := createEncoderOfJsonRawMessage(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfJsonNumber(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfMarshaler(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfAny(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	encoder = createEncoderOfNative(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	kind := typ.Kind()
+	switch kind {
+	case reflect.Interface:
+		return &dynamicEncoder{typ}
+	case reflect.Struct:
+		return encoderOfStruct(ctx, typ)
+	case reflect.Array:
+		return encoderOfArray(ctx, typ)
+	case reflect.Slice:
+		return encoderOfSlice(ctx, typ)
+	case reflect.Map:
+		return encoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return encoderOfOptional(ctx, typ)
+	default:
+		return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())}
+	}
+}
+
+type lazyErrorDecoder struct {
+	err error
+}
+
+func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.WhatIsNext() != NilValue {
+		if iter.Error == nil {
+			iter.Error = decoder.err
+		}
+	} else {
+		iter.Skip()
+	}
+}
+
+type lazyErrorEncoder struct {
+	err error
+}
+
+func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if ptr == nil {
+		stream.WriteNil()
+	} else if stream.Error == nil {
+		stream.Error = encoder.err
+	}
+}
+
+func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type placeholderDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.decoder.Decode(ptr, iter)
+}
+
+type placeholderEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(ptr, stream)
+}
+
+func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go
new file mode 100644
index 0000000..13a0b7b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_array.go
@@ -0,0 +1,104 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"unsafe"
+)
+
+func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder {
+	arrayType := typ.(*reflect2.UnsafeArrayType)
+	decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+	return &arrayDecoder{arrayType, decoder}
+}
+
+func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder {
+	arrayType := typ.(*reflect2.UnsafeArrayType)
+	if arrayType.Len() == 0 {
+		return emptyArrayEncoder{}
+	}
+	encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem())
+	return &arrayEncoder{arrayType, encoder}
+}
+
+type emptyArrayEncoder struct{}
+
+func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteEmptyArray()
+}
+
+func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return true
+}
+
+type arrayEncoder struct {
+	arrayType   *reflect2.UnsafeArrayType
+	elemEncoder ValEncoder
+}
+
+func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteArrayStart()
+	elemPtr := unsafe.Pointer(ptr)
+	encoder.elemEncoder.Encode(elemPtr, stream)
+	for i := 1; i < encoder.arrayType.Len(); i++ {
+		stream.WriteMore()
+		elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i)
+		encoder.elemEncoder.Encode(elemPtr, stream)
+	}
+	stream.WriteArrayEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error())
+	}
+}
+
+func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type arrayDecoder struct {
+	arrayType   *reflect2.UnsafeArrayType
+	elemDecoder ValDecoder
+}
+
+func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.doDecode(ptr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error())
+	}
+}
+
+func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	arrayType := decoder.arrayType
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		return
+	}
+	if c != '[' {
+		iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == ']' {
+		return
+	}
+	iter.unreadByte()
+	elemPtr := arrayType.UnsafeGetIndex(ptr, 0)
+	decoder.elemDecoder.Decode(elemPtr, iter)
+	length := 1
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		if length >= arrayType.Len() {
+			iter.Skip()
+			continue
+		}
+		idx := length
+		length += 1
+		elemPtr = arrayType.UnsafeGetIndex(ptr, idx)
+		decoder.elemDecoder.Decode(elemPtr, iter)
+	}
+	if c != ']' {
+		iter.ReportError("decode array", "expect ], but found "+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go
new file mode 100644
index 0000000..8b6bc8b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go
@@ -0,0 +1,70 @@
+package jsoniter
+
+import (
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"unsafe"
+)
+
+type dynamicEncoder struct {
+	valType reflect2.Type
+}
+
+func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	stream.WriteVal(obj)
+}
+
+func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.valType.UnsafeIndirect(ptr) == nil
+}
+
+type efaceDecoder struct {
+}
+
+func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	pObj := (*interface{})(ptr)
+	obj := *pObj
+	if obj == nil {
+		*pObj = iter.Read()
+		return
+	}
+	typ := reflect2.TypeOf(obj)
+	if typ.Kind() != reflect.Ptr {
+		*pObj = iter.Read()
+		return
+	}
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	ptrElemType := ptrType.Elem()
+	if iter.WhatIsNext() == NilValue {
+		if ptrElemType.Kind() != reflect.Ptr {
+			iter.skipFourBytes('n', 'u', 'l', 'l')
+			*pObj = nil
+			return
+		}
+	}
+	if reflect2.IsNil(obj) {
+		obj := ptrElemType.New()
+		iter.ReadVal(obj)
+		*pObj = obj
+		return
+	}
+	iter.ReadVal(obj)
+}
+
+type ifaceDecoder struct {
+	valType *reflect2.UnsafeIFaceType
+}
+
+func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew())
+		return
+	}
+	obj := decoder.valType.UnsafeIndirect(ptr)
+	if reflect2.IsNil(obj) {
+		iter.ReportError("decode non empty interface", "can not unmarshal into nil")
+		return
+	}
+	iter.ReadVal(obj)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go
new file mode 100644
index 0000000..05e8fbf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_extension.go
@@ -0,0 +1,483 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode"
+	"unsafe"
+)
+
+var typeDecoders = map[string]ValDecoder{}
+var fieldDecoders = map[string]ValDecoder{}
+var typeEncoders = map[string]ValEncoder{}
+var fieldEncoders = map[string]ValEncoder{}
+var extensions = []Extension{}
+
+// StructDescriptor describe how should we encode/decode the struct
+type StructDescriptor struct {
+	Type   reflect2.Type
+	Fields []*Binding
+}
+
+// GetField get one field from the descriptor by its name.
+// Can not use map here to keep field orders.
+func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding {
+	for _, binding := range structDescriptor.Fields {
+		if binding.Field.Name() == fieldName {
+			return binding
+		}
+	}
+	return nil
+}
+
+// Binding describe how should we encode/decode the struct field
+type Binding struct {
+	levels    []int
+	Field     reflect2.StructField
+	FromNames []string
+	ToNames   []string
+	Encoder   ValEncoder
+	Decoder   ValDecoder
+}
+
+// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder.
+// Can also rename fields by UpdateStructDescriptor.
+type Extension interface {
+	UpdateStructDescriptor(structDescriptor *StructDescriptor)
+	CreateMapKeyDecoder(typ reflect2.Type) ValDecoder
+	CreateMapKeyEncoder(typ reflect2.Type) ValEncoder
+	CreateDecoder(typ reflect2.Type) ValDecoder
+	CreateEncoder(typ reflect2.Type) ValEncoder
+	DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder
+	DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder
+}
+
+// DummyExtension embed this type get dummy implementation for all methods of Extension
+type DummyExtension struct {
+}
+
+// UpdateStructDescriptor No-op
+func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// CreateDecoder No-op
+func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateEncoder No-op
+func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type EncoderExtension map[reflect2.Type]ValEncoder
+
+// UpdateStructDescriptor No-op
+func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateDecoder No-op
+func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateEncoder get encoder from map
+func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return extension[typ]
+}
+
+// CreateMapKeyDecoder No-op
+func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type DecoderExtension map[reflect2.Type]ValDecoder
+
+// UpdateStructDescriptor No-op
+func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) {
+}
+
+// CreateMapKeyDecoder No-op
+func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder {
+	return nil
+}
+
+// CreateMapKeyEncoder No-op
+func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// CreateDecoder get decoder from map
+func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder {
+	return extension[typ]
+}
+
+// CreateEncoder No-op
+func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder {
+	return nil
+}
+
+// DecorateDecoder No-op
+func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder {
+	return decoder
+}
+
+// DecorateEncoder No-op
+func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder {
+	return encoder
+}
+
+type funcDecoder struct {
+	fun DecoderFunc
+}
+
+func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.fun(ptr, iter)
+}
+
+type funcEncoder struct {
+	fun         EncoderFunc
+	isEmptyFunc func(ptr unsafe.Pointer) bool
+}
+
+func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.fun(ptr, stream)
+}
+
+func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	if encoder.isEmptyFunc == nil {
+		return false
+	}
+	return encoder.isEmptyFunc(ptr)
+}
+
+// DecoderFunc the function form of TypeDecoder
+type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator)
+
+// EncoderFunc the function form of TypeEncoder
+type EncoderFunc func(ptr unsafe.Pointer, stream *Stream)
+
+// RegisterTypeDecoderFunc register TypeDecoder for a type with function
+func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) {
+	typeDecoders[typ] = &funcDecoder{fun}
+}
+
+// RegisterTypeDecoder register TypeDecoder for a typ
+func RegisterTypeDecoder(typ string, decoder ValDecoder) {
+	typeDecoders[typ] = decoder
+}
+
+// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function
+func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) {
+	RegisterFieldDecoder(typ, field, &funcDecoder{fun})
+}
+
+// RegisterFieldDecoder register TypeDecoder for a struct field
+func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) {
+	fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder
+}
+
+// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function
+func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+	typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc}
+}
+
+// RegisterTypeEncoder register TypeEncoder for a type
+func RegisterTypeEncoder(typ string, encoder ValEncoder) {
+	typeEncoders[typ] = encoder
+}
+
+// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function
+func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) {
+	RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc})
+}
+
+// RegisterFieldEncoder register TypeEncoder for a struct field
+func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) {
+	fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder
+}
+
+// RegisterExtension register extension
+func RegisterExtension(extension Extension) {
+	extensions = append(extensions, extension)
+}
+
+func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := _getTypeDecoderFromExtension(ctx, typ)
+	if decoder != nil {
+		for _, extension := range extensions {
+			decoder = extension.DecorateDecoder(typ, decoder)
+		}
+		decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
+		for _, extension := range ctx.extraExtensions {
+			decoder = extension.DecorateDecoder(typ, decoder)
+		}
+	}
+	return decoder
+}
+func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
+	for _, extension := range extensions {
+		decoder := extension.CreateDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	decoder := ctx.decoderExtension.CreateDecoder(typ)
+	if decoder != nil {
+		return decoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		decoder := extension.CreateDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	typeName := typ.String()
+	decoder = typeDecoders[typeName]
+	if decoder != nil {
+		return decoder
+	}
+	if typ.Kind() == reflect.Ptr {
+		ptrType := typ.(*reflect2.UnsafePtrType)
+		decoder := typeDecoders[ptrType.Elem().String()]
+		if decoder != nil {
+			return &OptionalDecoder{ptrType.Elem(), decoder}
+		}
+	}
+	return nil
+}
+
+func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := _getTypeEncoderFromExtension(ctx, typ)
+	if encoder != nil {
+		for _, extension := range extensions {
+			encoder = extension.DecorateEncoder(typ, encoder)
+		}
+		encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
+		for _, extension := range ctx.extraExtensions {
+			encoder = extension.DecorateEncoder(typ, encoder)
+		}
+	}
+	return encoder
+}
+
+func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
+	for _, extension := range extensions {
+		encoder := extension.CreateEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	encoder := ctx.encoderExtension.CreateEncoder(typ)
+	if encoder != nil {
+		return encoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		encoder := extension.CreateEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	typeName := typ.String()
+	encoder = typeEncoders[typeName]
+	if encoder != nil {
+		return encoder
+	}
+	if typ.Kind() == reflect.Ptr {
+		typePtr := typ.(*reflect2.UnsafePtrType)
+		encoder := typeEncoders[typePtr.Elem().String()]
+		if encoder != nil {
+			return &OptionalEncoder{encoder}
+		}
+	}
+	return nil
+}
+
+func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor {
+	structType := typ.(*reflect2.UnsafeStructType)
+	embeddedBindings := []*Binding{}
+	bindings := []*Binding{}
+	for i := 0; i < structType.NumField(); i++ {
+		field := structType.Field(i)
+		tag, hastag := field.Tag().Lookup(ctx.getTagKey())
+		if ctx.onlyTaggedField && !hastag && !field.Anonymous() {
+			continue
+		}
+		tagParts := strings.Split(tag, ",")
+		if tag == "-" {
+			continue
+		}
+		if field.Anonymous() && (tag == "" || tagParts[0] == "") {
+			if field.Type().Kind() == reflect.Struct {
+				structDescriptor := describeStruct(ctx, field.Type())
+				for _, binding := range structDescriptor.Fields {
+					binding.levels = append([]int{i}, binding.levels...)
+					omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+					binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+					binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+					embeddedBindings = append(embeddedBindings, binding)
+				}
+				continue
+			} else if field.Type().Kind() == reflect.Ptr {
+				ptrType := field.Type().(*reflect2.UnsafePtrType)
+				if ptrType.Elem().Kind() == reflect.Struct {
+					structDescriptor := describeStruct(ctx, ptrType.Elem())
+					for _, binding := range structDescriptor.Fields {
+						binding.levels = append([]int{i}, binding.levels...)
+						omitempty := binding.Encoder.(*structFieldEncoder).omitempty
+						binding.Encoder = &dereferenceEncoder{binding.Encoder}
+						binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty}
+						binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder}
+						binding.Decoder = &structFieldDecoder{field, binding.Decoder}
+						embeddedBindings = append(embeddedBindings, binding)
+					}
+					continue
+				}
+			}
+		}
+		fieldNames := calcFieldNames(field.Name(), tagParts[0], tag)
+		fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name())
+		decoder := fieldDecoders[fieldCacheKey]
+		if decoder == nil {
+			decoder = decoderOfType(ctx.append(field.Name()), field.Type())
+		}
+		encoder := fieldEncoders[fieldCacheKey]
+		if encoder == nil {
+			encoder = encoderOfType(ctx.append(field.Name()), field.Type())
+		}
+		binding := &Binding{
+			Field:     field,
+			FromNames: fieldNames,
+			ToNames:   fieldNames,
+			Decoder:   decoder,
+			Encoder:   encoder,
+		}
+		binding.levels = []int{i}
+		bindings = append(bindings, binding)
+	}
+	return createStructDescriptor(ctx, typ, bindings, embeddedBindings)
+}
+func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor {
+	structDescriptor := &StructDescriptor{
+		Type:   typ,
+		Fields: bindings,
+	}
+	for _, extension := range extensions {
+		extension.UpdateStructDescriptor(structDescriptor)
+	}
+	ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
+	ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
+	for _, extension := range ctx.extraExtensions {
+		extension.UpdateStructDescriptor(structDescriptor)
+	}
+	processTags(structDescriptor, ctx.frozenConfig)
+	// merge normal & embedded bindings & sort with original order
+	allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))
+	sort.Sort(allBindings)
+	structDescriptor.Fields = allBindings
+	return structDescriptor
+}
+
+type sortableBindings []*Binding
+
+func (bindings sortableBindings) Len() int {
+	return len(bindings)
+}
+
+func (bindings sortableBindings) Less(i, j int) bool {
+	left := bindings[i].levels
+	right := bindings[j].levels
+	k := 0
+	for {
+		if left[k] < right[k] {
+			return true
+		} else if left[k] > right[k] {
+			return false
+		}
+		k++
+	}
+}
+
+func (bindings sortableBindings) Swap(i, j int) {
+	bindings[i], bindings[j] = bindings[j], bindings[i]
+}
+
+func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) {
+	for _, binding := range structDescriptor.Fields {
+		shouldOmitEmpty := false
+		tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",")
+		for _, tagPart := range tagParts[1:] {
+			if tagPart == "omitempty" {
+				shouldOmitEmpty = true
+			} else if tagPart == "string" {
+				if binding.Field.Type().Kind() == reflect.String {
+					binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg}
+					binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg}
+				} else {
+					binding.Decoder = &stringModeNumberDecoder{binding.Decoder}
+					binding.Encoder = &stringModeNumberEncoder{binding.Encoder}
+				}
+			}
+		}
+		binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder}
+		binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty}
+	}
+}
+
+func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string {
+	// ignore?
+	if wholeTag == "-" {
+		return []string{}
+	}
+	// rename?
+	var fieldNames []string
+	if tagProvidedFieldName == "" {
+		fieldNames = []string{originalFieldName}
+	} else {
+		fieldNames = []string{tagProvidedFieldName}
+	}
+	// private?
+	isNotExported := unicode.IsLower(rune(originalFieldName[0]))
+	if isNotExported {
+		fieldNames = []string{}
+	}
+	return fieldNames
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go
new file mode 100644
index 0000000..98d45c1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_number.go
@@ -0,0 +1,112 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"strconv"
+	"unsafe"
+)
+
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+func CastJsonNumber(val interface{}) (string, bool) {
+	switch typedVal := val.(type) {
+	case json.Number:
+		return string(typedVal), true
+	case Number:
+		return string(typedVal), true
+	}
+	return "", false
+}
+
+var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem()
+var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem()
+
+func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ.AssignableTo(jsonNumberType) {
+		return &jsonNumberCodec{}
+	}
+	if typ.AssignableTo(jsoniterNumberType) {
+		return &jsoniterNumberCodec{}
+	}
+	return nil
+}
+
+func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ.AssignableTo(jsonNumberType) {
+		return &jsonNumberCodec{}
+	}
+	if typ.AssignableTo(jsoniterNumberType) {
+		return &jsoniterNumberCodec{}
+	}
+	return nil
+}
+
+type jsonNumberCodec struct {
+}
+
+func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	switch iter.WhatIsNext() {
+	case StringValue:
+		*((*json.Number)(ptr)) = json.Number(iter.ReadString())
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		*((*json.Number)(ptr)) = ""
+	default:
+		*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
+	}
+}
+
+func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	number := *((*json.Number)(ptr))
+	if len(number) == 0 {
+		stream.writeByte('0')
+	} else {
+		stream.WriteRaw(string(number))
+	}
+}
+
+func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*json.Number)(ptr))) == 0
+}
+
+type jsoniterNumberCodec struct {
+}
+
+func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	switch iter.WhatIsNext() {
+	case StringValue:
+		*((*Number)(ptr)) = Number(iter.ReadString())
+	case NilValue:
+		iter.skipFourBytes('n', 'u', 'l', 'l')
+		*((*Number)(ptr)) = ""
+	default:
+		*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
+	}
+}
+
+func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	number := *((*Number)(ptr))
+	if len(number) == 0 {
+		stream.writeByte('0')
+	} else {
+		stream.WriteRaw(string(number))
+	}
+}
+
+func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*Number)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
new file mode 100644
index 0000000..f261993
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go
@@ -0,0 +1,60 @@
+package jsoniter
+
+import (
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"unsafe"
+)
+
+var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()
+var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()
+
+func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == jsonRawMessageType {
+		return &jsonRawMessageCodec{}
+	}
+	if typ == jsoniterRawMessageType {
+		return &jsoniterRawMessageCodec{}
+	}
+	return nil
+}
+
+func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ == jsonRawMessageType {
+		return &jsonRawMessageCodec{}
+	}
+	if typ == jsoniterRawMessageType {
+		return &jsoniterRawMessageCodec{}
+	}
+	return nil
+}
+
+type jsonRawMessageCodec struct {
+}
+
+func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteRaw(string(*((*json.RawMessage)(ptr))))
+}
+
+func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*json.RawMessage)(ptr))) == 0
+}
+
+type jsoniterRawMessageCodec struct {
+}
+
+func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes())
+}
+
+func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteRaw(string(*((*RawMessage)(ptr))))
+}
+
+func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*RawMessage)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go
new file mode 100644
index 0000000..547b442
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_map.go
@@ -0,0 +1,338 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"sort"
+	"unsafe"
+)
+
+func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder {
+	mapType := typ.(*reflect2.UnsafeMapType)
+	keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key())
+	elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem())
+	return &mapDecoder{
+		mapType:     mapType,
+		keyType:     mapType.Key(),
+		elemType:    mapType.Elem(),
+		keyDecoder:  keyDecoder,
+		elemDecoder: elemDecoder,
+	}
+}
+
+func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
+	mapType := typ.(*reflect2.UnsafeMapType)
+	if ctx.sortMapKeys {
+		return &sortKeysMapEncoder{
+			mapType:     mapType,
+			keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+			elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+		}
+	}
+	return &mapEncoder{
+		mapType:     mapType,
+		keyEncoder:  encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()),
+		elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()),
+	}
+}
+
+func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
+	decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
+	if decoder != nil {
+		return decoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		decoder := extension.CreateMapKeyDecoder(typ)
+		if decoder != nil {
+			return decoder
+		}
+	}
+	switch typ.Kind() {
+	case reflect.String:
+		return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+	case reflect.Bool,
+		reflect.Uint8, reflect.Int8,
+		reflect.Uint16, reflect.Int16,
+		reflect.Uint32, reflect.Int32,
+		reflect.Uint64, reflect.Int64,
+		reflect.Uint, reflect.Int,
+		reflect.Float32, reflect.Float64,
+		reflect.Uintptr:
+		typ = reflect2.DefaultTypeOfKind(typ.Kind())
+		return &numericMapKeyDecoder{decoderOfType(ctx, typ)}
+	default:
+		ptrType := reflect2.PtrTo(typ)
+		if ptrType.Implements(unmarshalerType) {
+			return &referenceDecoder{
+				&unmarshalerDecoder{
+					valType: ptrType,
+				},
+			}
+		}
+		if typ.Implements(unmarshalerType) {
+			return &unmarshalerDecoder{
+				valType: typ,
+			}
+		}
+		if ptrType.Implements(textUnmarshalerType) {
+			return &referenceDecoder{
+				&textUnmarshalerDecoder{
+					valType: ptrType,
+				},
+			}
+		}
+		if typ.Implements(textUnmarshalerType) {
+			return &textUnmarshalerDecoder{
+				valType: typ,
+			}
+		}
+		return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+	}
+}
+
+func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
+	encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
+	if encoder != nil {
+		return encoder
+	}
+	for _, extension := range ctx.extraExtensions {
+		encoder := extension.CreateMapKeyEncoder(typ)
+		if encoder != nil {
+			return encoder
+		}
+	}
+	switch typ.Kind() {
+	case reflect.String:
+		return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String))
+	case reflect.Bool,
+		reflect.Uint8, reflect.Int8,
+		reflect.Uint16, reflect.Int16,
+		reflect.Uint32, reflect.Int32,
+		reflect.Uint64, reflect.Int64,
+		reflect.Uint, reflect.Int,
+		reflect.Float32, reflect.Float64,
+		reflect.Uintptr:
+		typ = reflect2.DefaultTypeOfKind(typ.Kind())
+		return &numericMapKeyEncoder{encoderOfType(ctx, typ)}
+	default:
+		if typ == textMarshalerType {
+			return &directTextMarshalerEncoder{
+				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			}
+		}
+		if typ.Implements(textMarshalerType) {
+			return &textMarshalerEncoder{
+				valType:       typ,
+				stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			}
+		}
+		if typ.Kind() == reflect.Interface {
+			return &dynamicMapKeyEncoder{ctx, typ}
+		}
+		return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)}
+	}
+}
+
+type mapDecoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyType     reflect2.Type
+	elemType    reflect2.Type
+	keyDecoder  ValDecoder
+	elemDecoder ValDecoder
+}
+
+func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	mapType := decoder.mapType
+	c := iter.nextToken()
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		*(*unsafe.Pointer)(ptr) = nil
+		mapType.UnsafeSet(ptr, mapType.UnsafeNew())
+		return
+	}
+	if mapType.UnsafeIsNil(ptr) {
+		mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0))
+	}
+	if c != '{' {
+		iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == '}' {
+		return
+	}
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
+		return
+	}
+	iter.unreadByte()
+	key := decoder.keyType.UnsafeNew()
+	decoder.keyDecoder.Decode(key, iter)
+	c = iter.nextToken()
+	if c != ':' {
+		iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+		return
+	}
+	elem := decoder.elemType.UnsafeNew()
+	decoder.elemDecoder.Decode(elem, iter)
+	decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		key := decoder.keyType.UnsafeNew()
+		decoder.keyDecoder.Decode(key, iter)
+		c = iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
+			return
+		}
+		elem := decoder.elemType.UnsafeNew()
+		decoder.elemDecoder.Decode(elem, iter)
+		decoder.mapType.UnsafeSetIndex(ptr, key, elem)
+	}
+	if c != '}' {
+		iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c}))
+	}
+}
+
+type numericMapKeyDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+		return
+	}
+	decoder.decoder.Decode(ptr, iter)
+	c = iter.nextToken()
+	if c != '"' {
+		iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c}))
+		return
+	}
+}
+
+type numericMapKeyEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.writeByte('"')
+	encoder.encoder.Encode(ptr, stream)
+	stream.writeByte('"')
+}
+
+func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type dynamicMapKeyEncoder struct {
+	ctx     *ctx
+	valType reflect2.Type
+}
+
+func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream)
+}
+
+func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj))
+}
+
+type mapEncoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyEncoder  ValEncoder
+	elemEncoder ValEncoder
+}
+
+func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteObjectStart()
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	for i := 0; iter.HasNext(); i++ {
+		if i != 0 {
+			stream.WriteMore()
+		}
+		key, elem := iter.UnsafeNext()
+		encoder.keyEncoder.Encode(key, stream)
+		if stream.indention > 0 {
+			stream.writeTwoBytes(byte(':'), byte(' '))
+		} else {
+			stream.writeByte(':')
+		}
+		encoder.elemEncoder.Encode(elem, stream)
+	}
+	stream.WriteObjectEnd()
+}
+
+func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	return !iter.HasNext()
+}
+
+type sortKeysMapEncoder struct {
+	mapType     *reflect2.UnsafeMapType
+	keyEncoder  ValEncoder
+	elemEncoder ValEncoder
+}
+
+func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *(*unsafe.Pointer)(ptr) == nil {
+		stream.WriteNil()
+		return
+	}
+	stream.WriteObjectStart()
+	mapIter := encoder.mapType.UnsafeIterate(ptr)
+	subStream := stream.cfg.BorrowStream(nil)
+	subIter := stream.cfg.BorrowIterator(nil)
+	keyValues := encodedKeyValues{}
+	for mapIter.HasNext() {
+		subStream.buf = make([]byte, 0, 64)
+		key, elem := mapIter.UnsafeNext()
+		encoder.keyEncoder.Encode(key, subStream)
+		if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil {
+			stream.Error = subStream.Error
+		}
+		encodedKey := subStream.Buffer()
+		subIter.ResetBytes(encodedKey)
+		decodedKey := subIter.ReadString()
+		if stream.indention > 0 {
+			subStream.writeTwoBytes(byte(':'), byte(' '))
+		} else {
+			subStream.writeByte(':')
+		}
+		encoder.elemEncoder.Encode(elem, subStream)
+		keyValues = append(keyValues, encodedKV{
+			key:      decodedKey,
+			keyValue: subStream.Buffer(),
+		})
+	}
+	sort.Sort(keyValues)
+	for i, keyValue := range keyValues {
+		if i != 0 {
+			stream.WriteMore()
+		}
+		stream.Write(keyValue.keyValue)
+	}
+	stream.WriteObjectEnd()
+	stream.cfg.ReturnStream(subStream)
+	stream.cfg.ReturnIterator(subIter)
+}
+
+func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	iter := encoder.mapType.UnsafeIterate(ptr)
+	return !iter.HasNext()
+}
+
+type encodedKeyValues []encodedKV
+
+type encodedKV struct {
+	key      string
+	keyValue []byte
+}
+
+func (sv encodedKeyValues) Len() int           { return len(sv) }
+func (sv encodedKeyValues) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key }
diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go
new file mode 100644
index 0000000..fea5071
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go
@@ -0,0 +1,217 @@
+package jsoniter
+
+import (
+	"encoding"
+	"encoding/json"
+	"github.com/modern-go/reflect2"
+	"unsafe"
+)
+
+var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem()
+var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem()
+var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem()
+var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem()
+
+func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder {
+	ptrType := reflect2.PtrTo(typ)
+	if ptrType.Implements(unmarshalerType) {
+		return &referenceDecoder{
+			&unmarshalerDecoder{ptrType},
+		}
+	}
+	if ptrType.Implements(textUnmarshalerType) {
+		return &referenceDecoder{
+			&textUnmarshalerDecoder{ptrType},
+		}
+	}
+	return nil
+}
+
+func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ == marshalerType {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &directMarshalerEncoder{
+			checkIsEmpty: checkIsEmpty,
+		}
+		return encoder
+	}
+	if typ.Implements(marshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &marshalerEncoder{
+			valType:      typ,
+			checkIsEmpty: checkIsEmpty,
+		}
+		return encoder
+	}
+	ptrType := reflect2.PtrTo(typ)
+	if ctx.prefix != "" && ptrType.Implements(marshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+		var encoder ValEncoder = &marshalerEncoder{
+			valType:      ptrType,
+			checkIsEmpty: checkIsEmpty,
+		}
+		return &referenceEncoder{encoder}
+	}
+	if typ == textMarshalerType {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &directTextMarshalerEncoder{
+			checkIsEmpty:  checkIsEmpty,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+		}
+		return encoder
+	}
+	if typ.Implements(textMarshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, typ)
+		var encoder ValEncoder = &textMarshalerEncoder{
+			valType:       typ,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			checkIsEmpty:  checkIsEmpty,
+		}
+		return encoder
+	}
+	// if prefix is empty, the type is the root type
+	if ctx.prefix != "" && ptrType.Implements(textMarshalerType) {
+		checkIsEmpty := createCheckIsEmpty(ctx, ptrType)
+		var encoder ValEncoder = &textMarshalerEncoder{
+			valType:       ptrType,
+			stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")),
+			checkIsEmpty:  checkIsEmpty,
+		}
+		return &referenceEncoder{encoder}
+	}
+	return nil
+}
+
+type marshalerEncoder struct {
+	checkIsEmpty checkIsEmpty
+	valType      reflect2.Type
+}
+
+func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+		stream.WriteNil()
+		return
+	}
+	bytes, err := json.Marshal(obj)
+	if err != nil {
+		stream.Error = err
+	} else {
+		stream.Write(bytes)
+	}
+}
+
+func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directMarshalerEncoder struct {
+	checkIsEmpty checkIsEmpty
+}
+
+func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	marshaler := *(*json.Marshaler)(ptr)
+	if marshaler == nil {
+		stream.WriteNil()
+		return
+	}
+	bytes, err := marshaler.MarshalJSON()
+	if err != nil {
+		stream.Error = err
+	} else {
+		stream.Write(bytes)
+	}
+}
+
+func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type textMarshalerEncoder struct {
+	valType       reflect2.Type
+	stringEncoder ValEncoder
+	checkIsEmpty  checkIsEmpty
+}
+
+func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	obj := encoder.valType.UnsafeIndirect(ptr)
+	if encoder.valType.IsNullable() && reflect2.IsNil(obj) {
+		stream.WriteNil()
+		return
+	}
+	marshaler := (obj).(encoding.TextMarshaler)
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		stream.Error = err
+	} else {
+		str := string(bytes)
+		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+	}
+}
+
+func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type directTextMarshalerEncoder struct {
+	stringEncoder ValEncoder
+	checkIsEmpty  checkIsEmpty
+}
+
+func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	marshaler := *(*encoding.TextMarshaler)(ptr)
+	if marshaler == nil {
+		stream.WriteNil()
+		return
+	}
+	bytes, err := marshaler.MarshalText()
+	if err != nil {
+		stream.Error = err
+	} else {
+		str := string(bytes)
+		encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream)
+	}
+}
+
+func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.checkIsEmpty.IsEmpty(ptr)
+}
+
+type unmarshalerDecoder struct {
+	valType reflect2.Type
+}
+
+func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valType := decoder.valType
+	obj := valType.UnsafeIndirect(ptr)
+	unmarshaler := obj.(json.Unmarshaler)
+	iter.nextToken()
+	iter.unreadByte() // skip spaces
+	bytes := iter.SkipAndReturnBytes()
+	err := unmarshaler.UnmarshalJSON(bytes)
+	if err != nil {
+		iter.ReportError("unmarshalerDecoder", err.Error())
+	}
+}
+
+type textUnmarshalerDecoder struct {
+	valType reflect2.Type
+}
+
+func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valType := decoder.valType
+	obj := valType.UnsafeIndirect(ptr)
+	if reflect2.IsNil(obj) {
+		ptrType := valType.(*reflect2.UnsafePtrType)
+		elemType := ptrType.Elem()
+		elem := elemType.UnsafeNew()
+		ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem))
+		obj = valType.UnsafeIndirect(ptr)
+	}
+	unmarshaler := (obj).(encoding.TextUnmarshaler)
+	str := iter.ReadString()
+	err := unmarshaler.UnmarshalText([]byte(str))
+	if err != nil {
+		iter.ReportError("textUnmarshalerDecoder", err.Error())
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go
new file mode 100644
index 0000000..9042eb0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_native.go
@@ -0,0 +1,451 @@
+package jsoniter
+
+import (
+	"encoding/base64"
+	"reflect"
+	"strconv"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+const ptrSize = 32 << uintptr(^uintptr(0)>>63)
+
+func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder {
+	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+		sliceDecoder := decoderOfSlice(ctx, typ)
+		return &base64Codec{sliceDecoder: sliceDecoder}
+	}
+	typeName := typ.String()
+	kind := typ.Kind()
+	switch kind {
+	case reflect.String:
+		if typeName != "string" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+		}
+		return &stringCodec{}
+	case reflect.Int:
+		if typeName != "int" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &int32Codec{}
+		}
+		return &int64Codec{}
+	case reflect.Int8:
+		if typeName != "int8" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+		}
+		return &int8Codec{}
+	case reflect.Int16:
+		if typeName != "int16" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+		}
+		return &int16Codec{}
+	case reflect.Int32:
+		if typeName != "int32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+		}
+		return &int32Codec{}
+	case reflect.Int64:
+		if typeName != "int64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+		}
+		return &int64Codec{}
+	case reflect.Uint:
+		if typeName != "uint" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint8:
+		if typeName != "uint8" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+		}
+		return &uint8Codec{}
+	case reflect.Uint16:
+		if typeName != "uint16" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+		}
+		return &uint16Codec{}
+	case reflect.Uint32:
+		if typeName != "uint32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+		}
+		return &uint32Codec{}
+	case reflect.Uintptr:
+		if typeName != "uintptr" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+		}
+		if ptrSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint64:
+		if typeName != "uint64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+		}
+		return &uint64Codec{}
+	case reflect.Float32:
+		if typeName != "float32" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+		}
+		return &float32Codec{}
+	case reflect.Float64:
+		if typeName != "float64" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+		}
+		return &float64Codec{}
+	case reflect.Bool:
+		if typeName != "bool" {
+			return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+		}
+		return &boolCodec{}
+	}
+	return nil
+}
+
+func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder {
+	if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 {
+		sliceDecoder := decoderOfSlice(ctx, typ)
+		return &base64Codec{sliceDecoder: sliceDecoder}
+	}
+	typeName := typ.String()
+	switch typ.Kind() {
+	case reflect.String:
+		if typeName != "string" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem())
+		}
+		return &stringCodec{}
+	case reflect.Int:
+		if typeName != "int" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &int32Codec{}
+		}
+		return &int64Codec{}
+	case reflect.Int8:
+		if typeName != "int8" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem())
+		}
+		return &int8Codec{}
+	case reflect.Int16:
+		if typeName != "int16" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem())
+		}
+		return &int16Codec{}
+	case reflect.Int32:
+		if typeName != "int32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem())
+		}
+		return &int32Codec{}
+	case reflect.Int64:
+		if typeName != "int64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem())
+		}
+		return &int64Codec{}
+	case reflect.Uint:
+		if typeName != "uint" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem())
+		}
+		if strconv.IntSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint8:
+		if typeName != "uint8" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem())
+		}
+		return &uint8Codec{}
+	case reflect.Uint16:
+		if typeName != "uint16" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem())
+		}
+		return &uint16Codec{}
+	case reflect.Uint32:
+		if typeName != "uint32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem())
+		}
+		return &uint32Codec{}
+	case reflect.Uintptr:
+		if typeName != "uintptr" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem())
+		}
+		if ptrSize == 32 {
+			return &uint32Codec{}
+		}
+		return &uint64Codec{}
+	case reflect.Uint64:
+		if typeName != "uint64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem())
+		}
+		return &uint64Codec{}
+	case reflect.Float32:
+		if typeName != "float32" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem())
+		}
+		return &float32Codec{}
+	case reflect.Float64:
+		if typeName != "float64" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem())
+		}
+		return &float64Codec{}
+	case reflect.Bool:
+		if typeName != "bool" {
+			return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem())
+		}
+		return &boolCodec{}
+	}
+	return nil
+}
+
+type stringCodec struct {
+}
+
+func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	*((*string)(ptr)) = iter.ReadString()
+}
+
+func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	str := *((*string)(ptr))
+	stream.WriteString(str)
+}
+
+func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*string)(ptr)) == ""
+}
+
+type int8Codec struct {
+}
+
+func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int8)(ptr)) = iter.ReadInt8()
+	}
+}
+
+func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt8(*((*int8)(ptr)))
+}
+
+func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int8)(ptr)) == 0
+}
+
+type int16Codec struct {
+}
+
+func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int16)(ptr)) = iter.ReadInt16()
+	}
+}
+
+func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt16(*((*int16)(ptr)))
+}
+
+func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int16)(ptr)) == 0
+}
+
+type int32Codec struct {
+}
+
+func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int32)(ptr)) = iter.ReadInt32()
+	}
+}
+
+func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt32(*((*int32)(ptr)))
+}
+
+func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int32)(ptr)) == 0
+}
+
+type int64Codec struct {
+}
+
+func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*int64)(ptr)) = iter.ReadInt64()
+	}
+}
+
+func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteInt64(*((*int64)(ptr)))
+}
+
+func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*int64)(ptr)) == 0
+}
+
+type uint8Codec struct {
+}
+
+func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint8)(ptr)) = iter.ReadUint8()
+	}
+}
+
+func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint8(*((*uint8)(ptr)))
+}
+
+func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint8)(ptr)) == 0
+}
+
+type uint16Codec struct {
+}
+
+func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint16)(ptr)) = iter.ReadUint16()
+	}
+}
+
+func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint16(*((*uint16)(ptr)))
+}
+
+func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint16)(ptr)) == 0
+}
+
+type uint32Codec struct {
+}
+
+func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint32)(ptr)) = iter.ReadUint32()
+	}
+}
+
+func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint32(*((*uint32)(ptr)))
+}
+
+func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint32)(ptr)) == 0
+}
+
+type uint64Codec struct {
+}
+
+func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*uint64)(ptr)) = iter.ReadUint64()
+	}
+}
+
+func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteUint64(*((*uint64)(ptr)))
+}
+
+func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*uint64)(ptr)) == 0
+}
+
+type float32Codec struct {
+}
+
+func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*float32)(ptr)) = iter.ReadFloat32()
+	}
+}
+
+func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat32(*((*float32)(ptr)))
+}
+
+func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float32)(ptr)) == 0
+}
+
+type float64Codec struct {
+}
+
+func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*float64)(ptr)) = iter.ReadFloat64()
+	}
+}
+
+func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteFloat64(*((*float64)(ptr)))
+}
+
+func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*float64)(ptr)) == 0
+}
+
+type boolCodec struct {
+}
+
+func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.ReadNil() {
+		*((*bool)(ptr)) = iter.ReadBool()
+	}
+}
+
+func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteBool(*((*bool)(ptr)))
+}
+
+func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool {
+	return !(*((*bool)(ptr)))
+}
+
+type base64Codec struct {
+	sliceType    *reflect2.UnsafeSliceType
+	sliceDecoder ValDecoder
+}
+
+func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		codec.sliceType.UnsafeSetNil(ptr)
+		return
+	}
+	switch iter.WhatIsNext() {
+	case StringValue:
+		src := iter.ReadString()
+		dst, err := base64.StdEncoding.DecodeString(src)
+		if err != nil {
+			iter.ReportError("decode base64", err.Error())
+		} else {
+			codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst))
+		}
+	case ArrayValue:
+		codec.sliceDecoder.Decode(ptr, iter)
+	default:
+		iter.ReportError("base64Codec", "invalid input")
+	}
+}
+
+func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
+	src := *((*[]byte)(ptr))
+	if len(src) == 0 {
+		stream.WriteNil()
+		return
+	}
+	encoding := base64.StdEncoding
+	stream.writeByte('"')
+	size := encoding.EncodedLen(len(src))
+	buf := make([]byte, size)
+	encoding.Encode(buf, src)
+	stream.buf = append(stream.buf, buf...)
+	stream.writeByte('"')
+}
+
+func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool {
+	return len(*((*[]byte)(ptr))) == 0
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go
new file mode 100644
index 0000000..43ec71d
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_optional.go
@@ -0,0 +1,133 @@
+package jsoniter
+
+import (
+	"github.com/modern-go/reflect2"
+	"reflect"
+	"unsafe"
+)
+
+func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder {
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	elemType := ptrType.Elem()
+	decoder := decoderOfType(ctx, elemType)
+	if ctx.prefix == "" && elemType.Kind() == reflect.Ptr {
+		return &dereferenceDecoder{elemType, decoder}
+	}
+	return &OptionalDecoder{elemType, decoder}
+}
+
+func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder {
+	ptrType := typ.(*reflect2.UnsafePtrType)
+	elemType := ptrType.Elem()
+	elemEncoder := encoderOfType(ctx, elemType)
+	encoder := &OptionalEncoder{elemEncoder}
+	return encoder
+}
+
+type OptionalDecoder struct {
+	ValueType    reflect2.Type
+	ValueDecoder ValDecoder
+}
+
+func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if iter.ReadNil() {
+		*((*unsafe.Pointer)(ptr)) = nil
+	} else {
+		if *((*unsafe.Pointer)(ptr)) == nil {
+			//pointer to null, we have to allocate memory to hold the value
+			newPtr := decoder.ValueType.UnsafeNew()
+			decoder.ValueDecoder.Decode(newPtr, iter)
+			*((*unsafe.Pointer)(ptr)) = newPtr
+		} else {
+			//reuse existing instance
+			decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+		}
+	}
+}
+
+type dereferenceDecoder struct {
+	// only to deference a pointer
+	valueType    reflect2.Type
+	valueDecoder ValDecoder
+}
+
+func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		//pointer to null, we have to allocate memory to hold the value
+		newPtr := decoder.valueType.UnsafeNew()
+		decoder.valueDecoder.Decode(newPtr, iter)
+		*((*unsafe.Pointer)(ptr)) = newPtr
+	} else {
+		//reuse existing instance
+		decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
+	}
+}
+
+type OptionalEncoder struct {
+	ValueEncoder ValEncoder
+}
+
+func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		stream.WriteNil()
+	} else {
+		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+	}
+}
+
+func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return *((*unsafe.Pointer)(ptr)) == nil
+}
+
+type dereferenceEncoder struct {
+	ValueEncoder ValEncoder
+}
+
+func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if *((*unsafe.Pointer)(ptr)) == nil {
+		stream.WriteNil()
+	} else {
+		encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
+	}
+}
+
+func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	dePtr := *((*unsafe.Pointer)(ptr))
+	if dePtr == nil {
+		return true
+	}
+	return encoder.ValueEncoder.IsEmpty(dePtr)
+}
+
+func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+	deReferenced := *((*unsafe.Pointer)(ptr))
+	if deReferenced == nil {
+		return true
+	}
+	isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil)
+	if !converted {
+		return false
+	}
+	fieldPtr := unsafe.Pointer(deReferenced)
+	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type referenceEncoder struct {
+	encoder ValEncoder
+}
+
+func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	encoder.encoder.Encode(unsafe.Pointer(&ptr), stream)
+}
+
+func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr))
+}
+
+type referenceDecoder struct {
+	decoder ValDecoder
+}
+
+func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.decoder.Decode(unsafe.Pointer(&ptr), iter)
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go
new file mode 100644
index 0000000..9441d79
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_slice.go
@@ -0,0 +1,99 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"unsafe"
+)
+
+func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder {
+	sliceType := typ.(*reflect2.UnsafeSliceType)
+	decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+	return &sliceDecoder{sliceType, decoder}
+}
+
+func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder {
+	sliceType := typ.(*reflect2.UnsafeSliceType)
+	encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem())
+	return &sliceEncoder{sliceType, encoder}
+}
+
+type sliceEncoder struct {
+	sliceType   *reflect2.UnsafeSliceType
+	elemEncoder ValEncoder
+}
+
+func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	if encoder.sliceType.UnsafeIsNil(ptr) {
+		stream.WriteNil()
+		return
+	}
+	length := encoder.sliceType.UnsafeLengthOf(ptr)
+	if length == 0 {
+		stream.WriteEmptyArray()
+		return
+	}
+	stream.WriteArrayStart()
+	encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream)
+	for i := 1; i < length; i++ {
+		stream.WriteMore()
+		elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i)
+		encoder.elemEncoder.Encode(elemPtr, stream)
+	}
+	stream.WriteArrayEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error())
+	}
+}
+
+func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.sliceType.UnsafeLengthOf(ptr) == 0
+}
+
+type sliceDecoder struct {
+	sliceType   *reflect2.UnsafeSliceType
+	elemDecoder ValDecoder
+}
+
+func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.doDecode(ptr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error())
+	}
+}
+
+func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	sliceType := decoder.sliceType
+	if c == 'n' {
+		iter.skipThreeBytes('u', 'l', 'l')
+		sliceType.UnsafeSetNil(ptr)
+		return
+	}
+	if c != '[' {
+		iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c}))
+		return
+	}
+	c = iter.nextToken()
+	if c == ']' {
+		sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0))
+		return
+	}
+	iter.unreadByte()
+	sliceType.UnsafeGrow(ptr, 1)
+	elemPtr := sliceType.UnsafeGetIndex(ptr, 0)
+	decoder.elemDecoder.Decode(elemPtr, iter)
+	length := 1
+	for c = iter.nextToken(); c == ','; c = iter.nextToken() {
+		idx := length
+		length += 1
+		sliceType.UnsafeGrow(ptr, length)
+		elemPtr = sliceType.UnsafeGetIndex(ptr, idx)
+		decoder.elemDecoder.Decode(elemPtr, iter)
+	}
+	if c != ']' {
+		iter.ReportError("decode slice", "expect ], but found "+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
new file mode 100644
index 0000000..355d2d1
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go
@@ -0,0 +1,1048 @@
+package jsoniter
+
+import (
+	"fmt"
+	"io"
+	"strings"
+	"unsafe"
+
+	"github.com/modern-go/reflect2"
+)
+
+func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder {
+	bindings := map[string]*Binding{}
+	structDescriptor := describeStruct(ctx, typ)
+	for _, binding := range structDescriptor.Fields {
+		for _, fromName := range binding.FromNames {
+			old := bindings[fromName]
+			if old == nil {
+				bindings[fromName] = binding
+				continue
+			}
+			ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding)
+			if ignoreOld {
+				delete(bindings, fromName)
+			}
+			if !ignoreNew {
+				bindings[fromName] = binding
+			}
+		}
+	}
+	fields := map[string]*structFieldDecoder{}
+	for k, binding := range bindings {
+		fields[k] = binding.Decoder.(*structFieldDecoder)
+	}
+
+	if !ctx.caseSensitive() {
+		for k, binding := range bindings {
+			if _, found := fields[strings.ToLower(k)]; !found {
+				fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder)
+			}
+		}
+	}
+
+	return createStructDecoder(ctx, typ, fields)
+}
+
+func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder {
+	if ctx.disallowUnknownFields {
+		return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true}
+	}
+	knownHash := map[int64]struct{}{
+		0: {},
+	}
+
+	switch len(fields) {
+	case 0:
+		return &skipObjectDecoder{typ}
+	case 1:
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder}
+		}
+	case 2:
+		var fieldHash1 int64
+		var fieldHash2 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldHash1 == 0 {
+				fieldHash1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else {
+				fieldHash2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			}
+		}
+		return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2}
+	case 3:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			}
+		}
+		return &threeFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3}
+	case 4:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			}
+		}
+		return &fourFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4}
+	case 5:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			}
+		}
+		return &fiveFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5}
+	case 6:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			}
+		}
+		return &sixFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6}
+	case 7:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			}
+		}
+		return &sevenFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7}
+	case 8:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			}
+		}
+		return &eightFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8}
+	case 9:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldName9 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		var fieldDecoder9 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else if fieldName8 == 0 {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			} else {
+				fieldName9 = fieldHash
+				fieldDecoder9 = fieldDecoder
+			}
+		}
+		return &nineFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8,
+			fieldName9, fieldDecoder9}
+	case 10:
+		var fieldName1 int64
+		var fieldName2 int64
+		var fieldName3 int64
+		var fieldName4 int64
+		var fieldName5 int64
+		var fieldName6 int64
+		var fieldName7 int64
+		var fieldName8 int64
+		var fieldName9 int64
+		var fieldName10 int64
+		var fieldDecoder1 *structFieldDecoder
+		var fieldDecoder2 *structFieldDecoder
+		var fieldDecoder3 *structFieldDecoder
+		var fieldDecoder4 *structFieldDecoder
+		var fieldDecoder5 *structFieldDecoder
+		var fieldDecoder6 *structFieldDecoder
+		var fieldDecoder7 *structFieldDecoder
+		var fieldDecoder8 *structFieldDecoder
+		var fieldDecoder9 *structFieldDecoder
+		var fieldDecoder10 *structFieldDecoder
+		for fieldName, fieldDecoder := range fields {
+			fieldHash := calcHash(fieldName, ctx.caseSensitive())
+			_, known := knownHash[fieldHash]
+			if known {
+				return &generalStructDecoder{typ, fields, false}
+			}
+			knownHash[fieldHash] = struct{}{}
+			if fieldName1 == 0 {
+				fieldName1 = fieldHash
+				fieldDecoder1 = fieldDecoder
+			} else if fieldName2 == 0 {
+				fieldName2 = fieldHash
+				fieldDecoder2 = fieldDecoder
+			} else if fieldName3 == 0 {
+				fieldName3 = fieldHash
+				fieldDecoder3 = fieldDecoder
+			} else if fieldName4 == 0 {
+				fieldName4 = fieldHash
+				fieldDecoder4 = fieldDecoder
+			} else if fieldName5 == 0 {
+				fieldName5 = fieldHash
+				fieldDecoder5 = fieldDecoder
+			} else if fieldName6 == 0 {
+				fieldName6 = fieldHash
+				fieldDecoder6 = fieldDecoder
+			} else if fieldName7 == 0 {
+				fieldName7 = fieldHash
+				fieldDecoder7 = fieldDecoder
+			} else if fieldName8 == 0 {
+				fieldName8 = fieldHash
+				fieldDecoder8 = fieldDecoder
+			} else if fieldName9 == 0 {
+				fieldName9 = fieldHash
+				fieldDecoder9 = fieldDecoder
+			} else {
+				fieldName10 = fieldHash
+				fieldDecoder10 = fieldDecoder
+			}
+		}
+		return &tenFieldsStructDecoder{typ,
+			fieldName1, fieldDecoder1,
+			fieldName2, fieldDecoder2,
+			fieldName3, fieldDecoder3,
+			fieldName4, fieldDecoder4,
+			fieldName5, fieldDecoder5,
+			fieldName6, fieldDecoder6,
+			fieldName7, fieldDecoder7,
+			fieldName8, fieldDecoder8,
+			fieldName9, fieldDecoder9,
+			fieldName10, fieldDecoder10}
+	}
+	return &generalStructDecoder{typ, fields, false}
+}
+
+type generalStructDecoder struct {
+	typ                   reflect2.Type
+	fields                map[string]*structFieldDecoder
+	disallowUnknownFields bool
+}
+
+func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	var c byte
+	for c = ','; c == ','; c = iter.nextToken() {
+		decoder.decodeOneField(ptr, iter)
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+	if c != '}' {
+		iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c}))
+	}
+}
+
+func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) {
+	var field string
+	var fieldDecoder *structFieldDecoder
+	if iter.cfg.objectFieldMustBeSimpleString {
+		fieldBytes := iter.ReadStringAsSlice()
+		field = *(*string)(unsafe.Pointer(&fieldBytes))
+		fieldDecoder = decoder.fields[field]
+		if fieldDecoder == nil && !iter.cfg.caseSensitive {
+			fieldDecoder = decoder.fields[strings.ToLower(field)]
+		}
+	} else {
+		field = iter.ReadString()
+		fieldDecoder = decoder.fields[field]
+		if fieldDecoder == nil && !iter.cfg.caseSensitive {
+			fieldDecoder = decoder.fields[strings.ToLower(field)]
+		}
+	}
+	if fieldDecoder == nil {
+		msg := "found unknown field: " + field
+		if decoder.disallowUnknownFields {
+			iter.ReportError("ReadObject", msg)
+		}
+		c := iter.nextToken()
+		if c != ':' {
+			iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+		}
+		iter.Skip()
+		return
+	}
+	c := iter.nextToken()
+	if c != ':' {
+		iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
+	}
+	fieldDecoder.Decode(ptr, iter)
+}
+
+type skipObjectDecoder struct {
+	typ reflect2.Type
+}
+
+func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	valueType := iter.WhatIsNext()
+	if valueType != ObjectValue && valueType != NilValue {
+		iter.ReportError("skipObjectDecoder", "expect object or null")
+		return
+	}
+	iter.Skip()
+}
+
+type oneFieldStructDecoder struct {
+	typ          reflect2.Type
+	fieldHash    int64
+	fieldDecoder *structFieldDecoder
+}
+
+func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		if iter.readFieldHash() == decoder.fieldHash {
+			decoder.fieldDecoder.Decode(ptr, iter)
+		} else {
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type twoFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+}
+
+func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type threeFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+}
+
+func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type fourFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+}
+
+func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type fiveFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+}
+
+func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type sixFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+}
+
+func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type sevenFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+}
+
+func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type eightFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+	fieldHash8    int64
+	fieldDecoder8 *structFieldDecoder
+}
+
+func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type nineFieldsStructDecoder struct {
+	typ           reflect2.Type
+	fieldHash1    int64
+	fieldDecoder1 *structFieldDecoder
+	fieldHash2    int64
+	fieldDecoder2 *structFieldDecoder
+	fieldHash3    int64
+	fieldDecoder3 *structFieldDecoder
+	fieldHash4    int64
+	fieldDecoder4 *structFieldDecoder
+	fieldHash5    int64
+	fieldDecoder5 *structFieldDecoder
+	fieldHash6    int64
+	fieldDecoder6 *structFieldDecoder
+	fieldHash7    int64
+	fieldDecoder7 *structFieldDecoder
+	fieldHash8    int64
+	fieldDecoder8 *structFieldDecoder
+	fieldHash9    int64
+	fieldDecoder9 *structFieldDecoder
+}
+
+func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		case decoder.fieldHash9:
+			decoder.fieldDecoder9.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type tenFieldsStructDecoder struct {
+	typ            reflect2.Type
+	fieldHash1     int64
+	fieldDecoder1  *structFieldDecoder
+	fieldHash2     int64
+	fieldDecoder2  *structFieldDecoder
+	fieldHash3     int64
+	fieldDecoder3  *structFieldDecoder
+	fieldHash4     int64
+	fieldDecoder4  *structFieldDecoder
+	fieldHash5     int64
+	fieldDecoder5  *structFieldDecoder
+	fieldHash6     int64
+	fieldDecoder6  *structFieldDecoder
+	fieldHash7     int64
+	fieldDecoder7  *structFieldDecoder
+	fieldHash8     int64
+	fieldDecoder8  *structFieldDecoder
+	fieldHash9     int64
+	fieldDecoder9  *structFieldDecoder
+	fieldHash10    int64
+	fieldDecoder10 *structFieldDecoder
+}
+
+func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	if !iter.readObjectStart() {
+		return
+	}
+	for {
+		switch iter.readFieldHash() {
+		case decoder.fieldHash1:
+			decoder.fieldDecoder1.Decode(ptr, iter)
+		case decoder.fieldHash2:
+			decoder.fieldDecoder2.Decode(ptr, iter)
+		case decoder.fieldHash3:
+			decoder.fieldDecoder3.Decode(ptr, iter)
+		case decoder.fieldHash4:
+			decoder.fieldDecoder4.Decode(ptr, iter)
+		case decoder.fieldHash5:
+			decoder.fieldDecoder5.Decode(ptr, iter)
+		case decoder.fieldHash6:
+			decoder.fieldDecoder6.Decode(ptr, iter)
+		case decoder.fieldHash7:
+			decoder.fieldDecoder7.Decode(ptr, iter)
+		case decoder.fieldHash8:
+			decoder.fieldDecoder8.Decode(ptr, iter)
+		case decoder.fieldHash9:
+			decoder.fieldDecoder9.Decode(ptr, iter)
+		case decoder.fieldHash10:
+			decoder.fieldDecoder10.Decode(ptr, iter)
+		default:
+			iter.Skip()
+		}
+		if iter.isObjectEnd() {
+			break
+		}
+	}
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error())
+	}
+}
+
+type structFieldDecoder struct {
+	field        reflect2.StructField
+	fieldDecoder ValDecoder
+}
+
+func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	fieldPtr := decoder.field.UnsafeGet(ptr)
+	decoder.fieldDecoder.Decode(fieldPtr, iter)
+	if iter.Error != nil && iter.Error != io.EOF {
+		iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error())
+	}
+}
+
+type stringModeStringDecoder struct {
+	elemDecoder ValDecoder
+	cfg         *frozenConfig
+}
+
+func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	decoder.elemDecoder.Decode(ptr, iter)
+	str := *((*string)(ptr))
+	tempIter := decoder.cfg.BorrowIterator([]byte(str))
+	defer decoder.cfg.ReturnIterator(tempIter)
+	*((*string)(ptr)) = tempIter.ReadString()
+}
+
+type stringModeNumberDecoder struct {
+	elemDecoder ValDecoder
+}
+
+func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
+	c := iter.nextToken()
+	if c != '"' {
+		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+		return
+	}
+	decoder.elemDecoder.Decode(ptr, iter)
+	if iter.Error != nil {
+		return
+	}
+	c = iter.readByte()
+	if c != '"' {
+		iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
+		return
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
new file mode 100644
index 0000000..d0759cf
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go
@@ -0,0 +1,210 @@
+package jsoniter
+
+import (
+	"fmt"
+	"github.com/modern-go/reflect2"
+	"io"
+	"reflect"
+	"unsafe"
+)
+
+func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder {
+	type bindingTo struct {
+		binding *Binding
+		toName  string
+		ignored bool
+	}
+	orderedBindings := []*bindingTo{}
+	structDescriptor := describeStruct(ctx, typ)
+	for _, binding := range structDescriptor.Fields {
+		for _, toName := range binding.ToNames {
+			new := &bindingTo{
+				binding: binding,
+				toName:  toName,
+			}
+			for _, old := range orderedBindings {
+				if old.toName != toName {
+					continue
+				}
+				old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding)
+			}
+			orderedBindings = append(orderedBindings, new)
+		}
+	}
+	if len(orderedBindings) == 0 {
+		return &emptyStructEncoder{}
+	}
+	finalOrderedFields := []structFieldTo{}
+	for _, bindingTo := range orderedBindings {
+		if !bindingTo.ignored {
+			finalOrderedFields = append(finalOrderedFields, structFieldTo{
+				encoder: bindingTo.binding.Encoder.(*structFieldEncoder),
+				toName:  bindingTo.toName,
+			})
+		}
+	}
+	return &structEncoder{typ, finalOrderedFields}
+}
+
+func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty {
+	encoder := createEncoderOfNative(ctx, typ)
+	if encoder != nil {
+		return encoder
+	}
+	kind := typ.Kind()
+	switch kind {
+	case reflect.Interface:
+		return &dynamicEncoder{typ}
+	case reflect.Struct:
+		return &structEncoder{typ: typ}
+	case reflect.Array:
+		return &arrayEncoder{}
+	case reflect.Slice:
+		return &sliceEncoder{}
+	case reflect.Map:
+		return encoderOfMap(ctx, typ)
+	case reflect.Ptr:
+		return &OptionalEncoder{}
+	default:
+		return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)}
+	}
+}
+
+func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) {
+	newTagged := new.Field.Tag().Get(cfg.getTagKey()) != ""
+	oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != ""
+	if newTagged {
+		if oldTagged {
+			if len(old.levels) > len(new.levels) {
+				return true, false
+			} else if len(new.levels) > len(old.levels) {
+				return false, true
+			} else {
+				return true, true
+			}
+		} else {
+			return true, false
+		}
+	} else {
+		if oldTagged {
+			return true, false
+		}
+		if len(old.levels) > len(new.levels) {
+			return true, false
+		} else if len(new.levels) > len(old.levels) {
+			return false, true
+		} else {
+			return true, true
+		}
+	}
+}
+
+type structFieldEncoder struct {
+	field        reflect2.StructField
+	fieldEncoder ValEncoder
+	omitempty    bool
+}
+
+func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	encoder.fieldEncoder.Encode(fieldPtr, stream)
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error())
+	}
+}
+
+func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	return encoder.fieldEncoder.IsEmpty(fieldPtr)
+}
+
+func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool {
+	isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil)
+	if !converted {
+		return false
+	}
+	fieldPtr := encoder.field.UnsafeGet(ptr)
+	return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr)
+}
+
+type IsEmbeddedPtrNil interface {
+	IsEmbeddedPtrNil(ptr unsafe.Pointer) bool
+}
+
+type structEncoder struct {
+	typ    reflect2.Type
+	fields []structFieldTo
+}
+
+type structFieldTo struct {
+	encoder *structFieldEncoder
+	toName  string
+}
+
+func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteObjectStart()
+	isNotFirst := false
+	for _, field := range encoder.fields {
+		if field.encoder.omitempty && field.encoder.IsEmpty(ptr) {
+			continue
+		}
+		if field.encoder.IsEmbeddedPtrNil(ptr) {
+			continue
+		}
+		if isNotFirst {
+			stream.WriteMore()
+		}
+		stream.WriteObjectField(field.toName)
+		field.encoder.Encode(ptr, stream)
+		isNotFirst = true
+	}
+	stream.WriteObjectEnd()
+	if stream.Error != nil && stream.Error != io.EOF {
+		stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error())
+	}
+}
+
+func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type emptyStructEncoder struct {
+}
+
+func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.WriteEmptyObject()
+}
+
+func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return false
+}
+
+type stringModeNumberEncoder struct {
+	elemEncoder ValEncoder
+}
+
+func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	stream.writeByte('"')
+	encoder.elemEncoder.Encode(ptr, stream)
+	stream.writeByte('"')
+}
+
+func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.elemEncoder.IsEmpty(ptr)
+}
+
+type stringModeStringEncoder struct {
+	elemEncoder ValEncoder
+	cfg         *frozenConfig
+}
+
+func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
+	tempStream := encoder.cfg.BorrowStream(nil)
+	defer encoder.cfg.ReturnStream(tempStream)
+	encoder.elemEncoder.Encode(ptr, tempStream)
+	stream.WriteString(string(tempStream.Buffer()))
+}
+
+func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool {
+	return encoder.elemEncoder.IsEmpty(ptr)
+}
diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go
new file mode 100644
index 0000000..17662fd
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream.go
@@ -0,0 +1,211 @@
+package jsoniter
+
+import (
+	"io"
+)
+
+// stream is a io.Writer like object, with JSON specific write functions.
+// Error is not returned as return value, but stored as Error member on this stream instance.
+type Stream struct {
+	cfg        *frozenConfig
+	out        io.Writer
+	buf        []byte
+	Error      error
+	indention  int
+	Attachment interface{} // open for customized encoder
+}
+
+// NewStream create new stream instance.
+// cfg can be jsoniter.ConfigDefault.
+// out can be nil if write to internal buffer.
+// bufSize is the initial size for the internal buffer in bytes.
+func NewStream(cfg API, out io.Writer, bufSize int) *Stream {
+	return &Stream{
+		cfg:       cfg.(*frozenConfig),
+		out:       out,
+		buf:       make([]byte, 0, bufSize),
+		Error:     nil,
+		indention: 0,
+	}
+}
+
+// Pool returns a pool can provide more stream with same configuration
+func (stream *Stream) Pool() StreamPool {
+	return stream.cfg
+}
+
+// Reset reuse this stream instance by assign a new writer
+func (stream *Stream) Reset(out io.Writer) {
+	stream.out = out
+	stream.buf = stream.buf[:0]
+}
+
+// Available returns how many bytes are unused in the buffer.
+func (stream *Stream) Available() int {
+	return cap(stream.buf) - len(stream.buf)
+}
+
+// Buffered returns the number of bytes that have been written into the current buffer.
+func (stream *Stream) Buffered() int {
+	return len(stream.buf)
+}
+
+// Buffer if writer is nil, use this method to take the result
+func (stream *Stream) Buffer() []byte {
+	return stream.buf
+}
+
+// SetBuffer allows to append to the internal buffer directly
+func (stream *Stream) SetBuffer(buf []byte) {
+	stream.buf = buf
+}
+
+// Write writes the contents of p into the buffer.
+// It returns the number of bytes written.
+// If nn < len(p), it also returns an error explaining
+// why the write is short.
+func (stream *Stream) Write(p []byte) (nn int, err error) {
+	stream.buf = append(stream.buf, p...)
+	if stream.out != nil {
+		nn, err = stream.out.Write(stream.buf)
+		stream.buf = stream.buf[nn:]
+		return
+	}
+	return len(p), nil
+}
+
+// WriteByte writes a single byte.
+func (stream *Stream) writeByte(c byte) {
+	stream.buf = append(stream.buf, c)
+}
+
+func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) {
+	stream.buf = append(stream.buf, c1, c2)
+}
+
+func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3)
+}
+
+func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3, c4)
+}
+
+func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) {
+	stream.buf = append(stream.buf, c1, c2, c3, c4, c5)
+}
+
+// Flush writes any buffered data to the underlying io.Writer.
+func (stream *Stream) Flush() error {
+	if stream.out == nil {
+		return nil
+	}
+	if stream.Error != nil {
+		return stream.Error
+	}
+	n, err := stream.out.Write(stream.buf)
+	if err != nil {
+		if stream.Error == nil {
+			stream.Error = err
+		}
+		return err
+	}
+	stream.buf = stream.buf[n:]
+	return nil
+}
+
+// WriteRaw write string out without quotes, just like []byte
+func (stream *Stream) WriteRaw(s string) {
+	stream.buf = append(stream.buf, s...)
+}
+
+// WriteNil write null to stream
+func (stream *Stream) WriteNil() {
+	stream.writeFourBytes('n', 'u', 'l', 'l')
+}
+
+// WriteTrue write true to stream
+func (stream *Stream) WriteTrue() {
+	stream.writeFourBytes('t', 'r', 'u', 'e')
+}
+
+// WriteFalse write false to stream
+func (stream *Stream) WriteFalse() {
+	stream.writeFiveBytes('f', 'a', 'l', 's', 'e')
+}
+
+// WriteBool write true or false into stream
+func (stream *Stream) WriteBool(val bool) {
+	if val {
+		stream.WriteTrue()
+	} else {
+		stream.WriteFalse()
+	}
+}
+
+// WriteObjectStart write { with possible indention
+func (stream *Stream) WriteObjectStart() {
+	stream.indention += stream.cfg.indentionStep
+	stream.writeByte('{')
+	stream.writeIndention(0)
+}
+
+// WriteObjectField write "field": with possible indention
+func (stream *Stream) WriteObjectField(field string) {
+	stream.WriteString(field)
+	if stream.indention > 0 {
+		stream.writeTwoBytes(':', ' ')
+	} else {
+		stream.writeByte(':')
+	}
+}
+
+// WriteObjectEnd write } with possible indention
+func (stream *Stream) WriteObjectEnd() {
+	stream.writeIndention(stream.cfg.indentionStep)
+	stream.indention -= stream.cfg.indentionStep
+	stream.writeByte('}')
+}
+
+// WriteEmptyObject write {}
+func (stream *Stream) WriteEmptyObject() {
+	stream.writeByte('{')
+	stream.writeByte('}')
+}
+
+// WriteMore write , with possible indention
+func (stream *Stream) WriteMore() {
+	stream.writeByte(',')
+	stream.writeIndention(0)
+	stream.Flush()
+}
+
+// WriteArrayStart write [ with possible indention
+func (stream *Stream) WriteArrayStart() {
+	stream.indention += stream.cfg.indentionStep
+	stream.writeByte('[')
+	stream.writeIndention(0)
+}
+
+// WriteEmptyArray write []
+func (stream *Stream) WriteEmptyArray() {
+	stream.writeTwoBytes('[', ']')
+}
+
+// WriteArrayEnd write ] with possible indention
+func (stream *Stream) WriteArrayEnd() {
+	stream.writeIndention(stream.cfg.indentionStep)
+	stream.indention -= stream.cfg.indentionStep
+	stream.writeByte(']')
+}
+
+func (stream *Stream) writeIndention(delta int) {
+	if stream.indention == 0 {
+		return
+	}
+	stream.writeByte('\n')
+	toWrite := stream.indention - delta
+	for i := 0; i < toWrite; i++ {
+		stream.buf = append(stream.buf, ' ')
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go
new file mode 100644
index 0000000..f318d2c
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_float.go
@@ -0,0 +1,94 @@
+package jsoniter
+
+import (
+	"math"
+	"strconv"
+)
+
+var pow10 []uint64
+
+func init() {
+	pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000}
+}
+
+// WriteFloat32 write float32 to stream
+func (stream *Stream) WriteFloat32(val float32) {
+	abs := math.Abs(float64(val))
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		if float32(abs) < 1e-6 || float32(abs) >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32)
+}
+
+// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat32Lossy(val float32) {
+	if val < 0 {
+		stream.writeByte('-')
+		val = -val
+	}
+	if val > 0x4ffffff {
+		stream.WriteFloat32(val)
+		return
+	}
+	precision := 6
+	exp := uint64(1000000) // 6
+	lval := uint64(float64(val)*float64(exp) + 0.5)
+	stream.WriteUint64(lval / exp)
+	fval := lval % exp
+	if fval == 0 {
+		return
+	}
+	stream.writeByte('.')
+	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+		stream.writeByte('0')
+	}
+	stream.WriteUint64(fval)
+	for stream.buf[len(stream.buf)-1] == '0' {
+		stream.buf = stream.buf[:len(stream.buf)-1]
+	}
+}
+
+// WriteFloat64 write float64 to stream
+func (stream *Stream) WriteFloat64(val float64) {
+	abs := math.Abs(val)
+	fmt := byte('f')
+	// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
+	if abs != 0 {
+		if abs < 1e-6 || abs >= 1e21 {
+			fmt = 'e'
+		}
+	}
+	stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64)
+}
+
+// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
+func (stream *Stream) WriteFloat64Lossy(val float64) {
+	if val < 0 {
+		stream.writeByte('-')
+		val = -val
+	}
+	if val > 0x4ffffff {
+		stream.WriteFloat64(val)
+		return
+	}
+	precision := 6
+	exp := uint64(1000000) // 6
+	lval := uint64(val*float64(exp) + 0.5)
+	stream.WriteUint64(lval / exp)
+	fval := lval % exp
+	if fval == 0 {
+		return
+	}
+	stream.writeByte('.')
+	for p := precision - 1; p > 0 && fval < pow10[p]; p-- {
+		stream.writeByte('0')
+	}
+	stream.WriteUint64(fval)
+	for stream.buf[len(stream.buf)-1] == '0' {
+		stream.buf = stream.buf[:len(stream.buf)-1]
+	}
+}
diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go
new file mode 100644
index 0000000..d1059ee
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_int.go
@@ -0,0 +1,190 @@
+package jsoniter
+
+var digits []uint32
+
+func init() {
+	digits = make([]uint32, 1000)
+	for i := uint32(0); i < 1000; i++ {
+		digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0'
+		if i < 10 {
+			digits[i] += 2 << 24
+		} else if i < 100 {
+			digits[i] += 1 << 24
+		}
+	}
+}
+
+func writeFirstBuf(space []byte, v uint32) []byte {
+	start := v >> 24
+	if start == 0 {
+		space = append(space, byte(v>>16), byte(v>>8))
+	} else if start == 1 {
+		space = append(space, byte(v>>8))
+	}
+	space = append(space, byte(v))
+	return space
+}
+
+func writeBuf(buf []byte, v uint32) []byte {
+	return append(buf, byte(v>>16), byte(v>>8), byte(v))
+}
+
+// WriteUint8 write uint8 to stream
+func (stream *Stream) WriteUint8(val uint8) {
+	stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteInt8 write int8 to stream
+func (stream *Stream) WriteInt8(nval int8) {
+	var val uint8
+	if nval < 0 {
+		val = uint8(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint8(nval)
+	}
+	stream.buf = writeFirstBuf(stream.buf, digits[val])
+}
+
+// WriteUint16 write uint16 to stream
+func (stream *Stream) WriteUint16(val uint16) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	stream.buf = writeFirstBuf(stream.buf, digits[q1])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+	return
+}
+
+// WriteInt16 write int16 to stream
+func (stream *Stream) WriteInt16(nval int16) {
+	var val uint16
+	if nval < 0 {
+		val = uint16(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint16(nval)
+	}
+	stream.WriteUint16(val)
+}
+
+// WriteUint32 write uint32 to stream
+func (stream *Stream) WriteUint32(val uint32) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	q2 := q1 / 1000
+	if q2 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q1])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r2 := q1 - q2*1000
+	q3 := q2 / 1000
+	if q3 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q2])
+	} else {
+		r3 := q2 - q3*1000
+		stream.buf = append(stream.buf, byte(q3+'0'))
+		stream.buf = writeBuf(stream.buf, digits[r3])
+	}
+	stream.buf = writeBuf(stream.buf, digits[r2])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt32 write int32 to stream
+func (stream *Stream) WriteInt32(nval int32) {
+	var val uint32
+	if nval < 0 {
+		val = uint32(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint32(nval)
+	}
+	stream.WriteUint32(val)
+}
+
+// WriteUint64 write uint64 to stream
+func (stream *Stream) WriteUint64(val uint64) {
+	q1 := val / 1000
+	if q1 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[val])
+		return
+	}
+	r1 := val - q1*1000
+	q2 := q1 / 1000
+	if q2 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q1])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r2 := q1 - q2*1000
+	q3 := q2 / 1000
+	if q3 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q2])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r3 := q2 - q3*1000
+	q4 := q3 / 1000
+	if q4 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q3])
+		stream.buf = writeBuf(stream.buf, digits[r3])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r4 := q3 - q4*1000
+	q5 := q4 / 1000
+	if q5 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q4])
+		stream.buf = writeBuf(stream.buf, digits[r4])
+		stream.buf = writeBuf(stream.buf, digits[r3])
+		stream.buf = writeBuf(stream.buf, digits[r2])
+		stream.buf = writeBuf(stream.buf, digits[r1])
+		return
+	}
+	r5 := q4 - q5*1000
+	q6 := q5 / 1000
+	if q6 == 0 {
+		stream.buf = writeFirstBuf(stream.buf, digits[q5])
+	} else {
+		stream.buf = writeFirstBuf(stream.buf, digits[q6])
+		r6 := q5 - q6*1000
+		stream.buf = writeBuf(stream.buf, digits[r6])
+	}
+	stream.buf = writeBuf(stream.buf, digits[r5])
+	stream.buf = writeBuf(stream.buf, digits[r4])
+	stream.buf = writeBuf(stream.buf, digits[r3])
+	stream.buf = writeBuf(stream.buf, digits[r2])
+	stream.buf = writeBuf(stream.buf, digits[r1])
+}
+
+// WriteInt64 write int64 to stream
+func (stream *Stream) WriteInt64(nval int64) {
+	var val uint64
+	if nval < 0 {
+		val = uint64(-nval)
+		stream.buf = append(stream.buf, '-')
+	} else {
+		val = uint64(nval)
+	}
+	stream.WriteUint64(val)
+}
+
+// WriteInt write int to stream
+func (stream *Stream) WriteInt(val int) {
+	stream.WriteInt64(int64(val))
+}
+
+// WriteUint write uint to stream
+func (stream *Stream) WriteUint(val uint) {
+	stream.WriteUint64(uint64(val))
+}
diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go
new file mode 100644
index 0000000..54c2ba0
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/stream_str.go
@@ -0,0 +1,372 @@
+package jsoniter
+
+import (
+	"unicode/utf8"
+)
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+var hex = "0123456789abcdef"
+
+// WriteStringWithHTMLEscaped write string to stream with html special characters escaped
+func (stream *Stream) WriteStringWithHTMLEscaped(s string) {
+	valLen := len(s)
+	stream.buf = append(stream.buf, '"')
+	// write string, the fast path, without utf8 and escape support
+	i := 0
+	for ; i < valLen; i++ {
+		c := s[i]
+		if c < utf8.RuneSelf && htmlSafeSet[c] {
+			stream.buf = append(stream.buf, c)
+		} else {
+			break
+		}
+	}
+	if i == valLen {
+		stream.buf = append(stream.buf, '"')
+		return
+	}
+	writeStringSlowPathWithHTMLEscaped(stream, i, s, valLen)
+}
+
+func writeStringSlowPathWithHTMLEscaped(stream *Stream, i int, s string, valLen int) {
+	start := i
+	// for the remaining parts, we process them char by char
+	for i < valLen {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] {
+				i++
+				continue
+			}
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				stream.writeTwoBytes('\\', b)
+			case '\n':
+				stream.writeTwoBytes('\\', 'n')
+			case '\r':
+				stream.writeTwoBytes('\\', 'r')
+			case '\t':
+				stream.writeTwoBytes('\\', 't')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				stream.WriteRaw(`\u00`)
+				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			stream.WriteRaw(`\ufffd`)
+			i++
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			stream.WriteRaw(`\u202`)
+			stream.writeByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		stream.WriteRaw(s[start:])
+	}
+	stream.writeByte('"')
+}
+
+// WriteString write string to stream without html escape
+func (stream *Stream) WriteString(s string) {
+	valLen := len(s)
+	stream.buf = append(stream.buf, '"')
+	// write string, the fast path, without utf8 and escape support
+	i := 0
+	for ; i < valLen; i++ {
+		c := s[i]
+		if c > 31 && c != '"' && c != '\\' {
+			stream.buf = append(stream.buf, c)
+		} else {
+			break
+		}
+	}
+	if i == valLen {
+		stream.buf = append(stream.buf, '"')
+		return
+	}
+	writeStringSlowPath(stream, i, s, valLen)
+}
+
+func writeStringSlowPath(stream *Stream, i int, s string, valLen int) {
+	start := i
+	// for the remaining parts, we process them char by char
+	for i < valLen {
+		if b := s[i]; b < utf8.RuneSelf {
+			if safeSet[b] {
+				i++
+				continue
+			}
+			if start < i {
+				stream.WriteRaw(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				stream.writeTwoBytes('\\', b)
+			case '\n':
+				stream.writeTwoBytes('\\', 'n')
+			case '\r':
+				stream.writeTwoBytes('\\', 'r')
+			case '\t':
+				stream.writeTwoBytes('\\', 't')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				stream.WriteRaw(`\u00`)
+				stream.writeTwoBytes(hex[b>>4], hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		i++
+		continue
+	}
+	if start < len(s) {
+		stream.WriteRaw(s[start:])
+	}
+	stream.writeByte('"')
+}
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
new file mode 100755
index 0000000..f4e7c0b
--- /dev/null
+++ b/vendor/github.com/json-iterator/go/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/json-iterator/go $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/modern-go/concurrent/.gitignore b/vendor/github.com/modern-go/concurrent/.gitignore
new file mode 100644
index 0000000..3f2bc47
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.gitignore
@@ -0,0 +1 @@
+/coverage.txt
diff --git a/vendor/github.com/modern-go/concurrent/.travis.yml b/vendor/github.com/modern-go/concurrent/.travis.yml
new file mode 100644
index 0000000..449e67c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/concurrent/LICENSE b/vendor/github.com/modern-go/concurrent/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/modern-go/concurrent/README.md b/vendor/github.com/modern-go/concurrent/README.md
new file mode 100644
index 0000000..acab320
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/README.md
@@ -0,0 +1,49 @@
+# concurrent
+
+[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/concurrent/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/concurrent?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/concurrent)
+[![Build Status](https://travis-ci.org/modern-go/concurrent.svg?branch=master)](https://travis-ci.org/modern-go/concurrent)
+[![codecov](https://codecov.io/gh/modern-go/concurrent/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/concurrent)
+[![rcard](https://goreportcard.com/badge/github.com/modern-go/concurrent)](https://goreportcard.com/report/github.com/modern-go/concurrent)
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/concurrent/master/LICENSE)
+
+* concurrent.Map: backport sync.Map for go below 1.9
+* concurrent.Executor: goroutine with explicit ownership and cancellable
+
+# concurrent.Map
+
+because sync.Map is only available in go 1.9, we can use concurrent.Map to make code portable
+
+```go
+m := concurrent.NewMap()
+m.Store("hello", "world")
+elem, found := m.Load("hello")
+// elem will be "world"
+// found will be true
+```
+
+# concurrent.Executor
+
+```go
+executor := concurrent.NewUnboundedExecutor()
+executor.Go(func(ctx context.Context) {
+    everyMillisecond := time.NewTicker(time.Millisecond)
+    for {
+        select {
+        case <-ctx.Done():
+            fmt.Println("goroutine exited")
+            return
+        case <-everyMillisecond.C:
+            // do something
+        }
+    }
+})
+time.Sleep(time.Second)
+executor.StopAndWaitForever()
+fmt.Println("executor stopped")
+```
+
+attach goroutine to executor instance, so that we can
+
+* cancel it by stop the executor with Stop/StopAndWait/StopAndWaitForever
+* handle panic by callback: the default behavior will no longer crash your application
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/executor.go b/vendor/github.com/modern-go/concurrent/executor.go
new file mode 100644
index 0000000..623dba1
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/executor.go
@@ -0,0 +1,14 @@
+package concurrent
+
+import "context"
+
+// Executor replace go keyword to start a new goroutine
+// the goroutine should cancel itself if the context passed in has been cancelled
+// the goroutine started by the executor, is owned by the executor
+// we can cancel all executors owned by the executor just by stop the executor itself
+// however Executor interface does not Stop method, the one starting and owning executor
+// should use the concrete type of executor, instead of this interface.
+type Executor interface {
+	// Go starts a new goroutine controlled by the context
+	Go(handler func(ctx context.Context))
+}
diff --git a/vendor/github.com/modern-go/concurrent/go_above_19.go b/vendor/github.com/modern-go/concurrent/go_above_19.go
new file mode 100644
index 0000000..aeabf8c
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_above_19.go
@@ -0,0 +1,15 @@
+//+build go1.9
+
+package concurrent
+
+import "sync"
+
+// Map is a wrapper for sync.Map introduced in go1.9
+type Map struct {
+	sync.Map
+}
+
+// NewMap creates a thread safe Map
+func NewMap() *Map {
+	return &Map{}
+}
diff --git a/vendor/github.com/modern-go/concurrent/go_below_19.go b/vendor/github.com/modern-go/concurrent/go_below_19.go
new file mode 100644
index 0000000..b9c8df7
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/go_below_19.go
@@ -0,0 +1,33 @@
+//+build !go1.9
+
+package concurrent
+
+import "sync"
+
+// Map implements a thread safe map for go version below 1.9 using mutex
+type Map struct {
+	lock sync.RWMutex
+	data map[interface{}]interface{}
+}
+
+// NewMap creates a thread safe map
+func NewMap() *Map {
+	return &Map{
+		data: make(map[interface{}]interface{}, 32),
+	}
+}
+
+// Load is same as sync.Map Load
+func (m *Map) Load(key interface{}) (elem interface{}, found bool) {
+	m.lock.RLock()
+	elem, found = m.data[key]
+	m.lock.RUnlock()
+	return
+}
+
+// Load is same as sync.Map Store
+func (m *Map) Store(key interface{}, elem interface{}) {
+	m.lock.Lock()
+	m.data[key] = elem
+	m.lock.Unlock()
+}
diff --git a/vendor/github.com/modern-go/concurrent/log.go b/vendor/github.com/modern-go/concurrent/log.go
new file mode 100644
index 0000000..9756fcc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/log.go
@@ -0,0 +1,13 @@
+package concurrent
+
+import (
+	"os"
+	"log"
+	"io/ioutil"
+)
+
+// ErrorLogger is used to print out error, can be set to writer other than stderr
+var ErrorLogger = log.New(os.Stderr, "", 0)
+
+// InfoLogger is used to print informational message, default to off
+var InfoLogger = log.New(ioutil.Discard, "", 0)
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
new file mode 100755
index 0000000..d1e6b2e
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/concurrent $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/modern-go/concurrent/unbounded_executor.go b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
new file mode 100644
index 0000000..05a77dc
--- /dev/null
+++ b/vendor/github.com/modern-go/concurrent/unbounded_executor.go
@@ -0,0 +1,119 @@
+package concurrent
+
+import (
+	"context"
+	"fmt"
+	"runtime"
+	"runtime/debug"
+	"sync"
+	"time"
+	"reflect"
+)
+
+// HandlePanic logs goroutine panic by default
+var HandlePanic = func(recovered interface{}, funcName string) {
+	ErrorLogger.Println(fmt.Sprintf("%s panic: %v", funcName, recovered))
+	ErrorLogger.Println(string(debug.Stack()))
+}
+
+// UnboundedExecutor is a executor without limits on counts of alive goroutines
+// it tracks the goroutine started by it, and can cancel them when shutdown
+type UnboundedExecutor struct {
+	ctx                   context.Context
+	cancel                context.CancelFunc
+	activeGoroutinesMutex *sync.Mutex
+	activeGoroutines      map[string]int
+	HandlePanic           func(recovered interface{}, funcName string)
+}
+
+// GlobalUnboundedExecutor has the life cycle of the program itself
+// any goroutine want to be shutdown before main exit can be started from this executor
+// GlobalUnboundedExecutor expects the main function to call stop
+// it does not magically knows the main function exits
+var GlobalUnboundedExecutor = NewUnboundedExecutor()
+
+// NewUnboundedExecutor creates a new UnboundedExecutor,
+// UnboundedExecutor can not be created by &UnboundedExecutor{}
+// HandlePanic can be set with a callback to override global HandlePanic
+func NewUnboundedExecutor() *UnboundedExecutor {
+	ctx, cancel := context.WithCancel(context.TODO())
+	return &UnboundedExecutor{
+		ctx:                   ctx,
+		cancel:                cancel,
+		activeGoroutinesMutex: &sync.Mutex{},
+		activeGoroutines:      map[string]int{},
+	}
+}
+
+// Go starts a new goroutine and tracks its lifecycle.
+// Panic will be recovered and logged automatically, except for StopSignal
+func (executor *UnboundedExecutor) Go(handler func(ctx context.Context)) {
+	pc := reflect.ValueOf(handler).Pointer()
+	f := runtime.FuncForPC(pc)
+	funcName := f.Name()
+	file, line := f.FileLine(pc)
+	executor.activeGoroutinesMutex.Lock()
+	defer executor.activeGoroutinesMutex.Unlock()
+	startFrom := fmt.Sprintf("%s:%d", file, line)
+	executor.activeGoroutines[startFrom] += 1
+	go func() {
+		defer func() {
+			recovered := recover()
+			// if you want to quit a goroutine without trigger HandlePanic
+			// use runtime.Goexit() to quit
+			if recovered != nil {
+				if executor.HandlePanic == nil {
+					HandlePanic(recovered, funcName)
+				} else {
+					executor.HandlePanic(recovered, funcName)
+				}
+			}
+			executor.activeGoroutinesMutex.Lock()
+			executor.activeGoroutines[startFrom] -= 1
+			executor.activeGoroutinesMutex.Unlock()
+		}()
+		handler(executor.ctx)
+	}()
+}
+
+// Stop cancel all goroutines started by this executor without wait
+func (executor *UnboundedExecutor) Stop() {
+	executor.cancel()
+}
+
+// StopAndWaitForever cancel all goroutines started by this executor and
+// wait until all goroutines exited
+func (executor *UnboundedExecutor) StopAndWaitForever() {
+	executor.StopAndWait(context.Background())
+}
+
+// StopAndWait cancel all goroutines started by this executor and wait.
+// Wait can be cancelled by the context passed in.
+func (executor *UnboundedExecutor) StopAndWait(ctx context.Context) {
+	executor.cancel()
+	for {
+		oneHundredMilliseconds := time.NewTimer(time.Millisecond * 100)
+		select {
+		case <-oneHundredMilliseconds.C:
+			if executor.checkNoActiveGoroutines() {
+				return
+			}
+		case <-ctx.Done():
+			return
+		}
+	}
+}
+
+func (executor *UnboundedExecutor) checkNoActiveGoroutines() bool {
+	executor.activeGoroutinesMutex.Lock()
+	defer executor.activeGoroutinesMutex.Unlock()
+	for startFrom, count := range executor.activeGoroutines {
+		if count > 0 {
+			InfoLogger.Println("UnboundedExecutor is still waiting goroutines to quit",
+				"startFrom", startFrom,
+				"count", count)
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/modern-go/reflect2/.gitignore b/vendor/github.com/modern-go/reflect2/.gitignore
new file mode 100644
index 0000000..7b26c94
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.gitignore
@@ -0,0 +1,2 @@
+/vendor
+/coverage.txt
diff --git a/vendor/github.com/modern-go/reflect2/.travis.yml b/vendor/github.com/modern-go/reflect2/.travis.yml
new file mode 100644
index 0000000..fbb4374
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+
+go:
+  - 1.8.x
+  - 1.x
+
+before_install:
+  - go get -t -v ./...
+  - go get -t -v github.com/modern-go/reflect2-tests/...
+
+script:
+  - ./test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.lock b/vendor/github.com/modern-go/reflect2/Gopkg.lock
new file mode 100644
index 0000000..2a3a698
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.lock
@@ -0,0 +1,15 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  name = "github.com/modern-go/concurrent"
+  packages = ["."]
+  revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
+  version = "1.0.0"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  inputs-digest = "daee8a88b3498b61c5640056665b8b9eea062006f5e596bbb6a3ed9119a11ec7"
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/modern-go/reflect2/Gopkg.toml b/vendor/github.com/modern-go/reflect2/Gopkg.toml
new file mode 100644
index 0000000..2f4f4db
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/Gopkg.toml
@@ -0,0 +1,35 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+#   name = "github.com/user/project"
+#   version = "1.0.0"
+#
+# [[constraint]]
+#   name = "github.com/user/project2"
+#   branch = "dev"
+#   source = "github.com/myfork/project2"
+#
+# [[override]]
+#   name = "github.com/x/y"
+#   version = "2.4.0"
+#
+# [prune]
+#   non-go = false
+#   go-tests = true
+#   unused-packages = true
+
+ignored = []
+
+[[constraint]]
+  name = "github.com/modern-go/concurrent"
+  version = "1.0.0"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/modern-go/reflect2/LICENSE b/vendor/github.com/modern-go/reflect2/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/modern-go/reflect2/README.md b/vendor/github.com/modern-go/reflect2/README.md
new file mode 100644
index 0000000..6f968aa
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/README.md
@@ -0,0 +1,71 @@
+# reflect2
+
+[![Sourcegraph](https://sourcegraph.com/github.com/modern-go/reflect2/-/badge.svg)](https://sourcegraph.com/github.com/modern-go/reflect2?badge)
+[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/modern-go/reflect2)
+[![Build Status](https://travis-ci.org/modern-go/reflect2.svg?branch=master)](https://travis-ci.org/modern-go/reflect2)
+[![codecov](https://codecov.io/gh/modern-go/reflect2/branch/master/graph/badge.svg)](https://codecov.io/gh/modern-go/reflect2)
+[![rcard](https://goreportcard.com/badge/github.com/modern-go/reflect2)](https://goreportcard.com/report/github.com/modern-go/reflect2)
+[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://raw.githubusercontent.com/modern-go/reflect2/master/LICENSE)
+
+reflect api that avoids runtime reflect.Value cost
+
+* reflect get/set interface{}, with type checking
+* reflect get/set unsafe.Pointer, without type checking
+* `reflect2.TypeByName` works like `Class.forName` found in java
+
+[json-iterator](https://github.com/json-iterator/go) use this package to save runtime dispatching cost.
+This package is designed for low level libraries to optimize reflection performance.
+General application should still use reflect standard library.
+
+# reflect2.TypeByName
+
+```go
+// given package is github.com/your/awesome-package
+type MyStruct struct {
+	// ...
+}
+
+// will return the type
+reflect2.TypeByName("awesome-package.MyStruct")
+// however, if the type has not been used
+// it will be eliminated by compiler, so we can not get it in runtime
+```
+
+# reflect2 get/set interface{}
+
+```go
+valType := reflect2.TypeOf(1)
+i := 1
+j := 10
+valType.Set(&i, &j)
+// i will be 10
+```
+
+to get set `type`, always use its pointer `*type`
+
+# reflect2 get/set unsafe.Pointer
+
+```go
+valType := reflect2.TypeOf(1)
+i := 1
+j := 10
+valType.UnsafeSet(unsafe.Pointer(&i), unsafe.Pointer(&j))
+// i will be 10
+```
+
+to get set `type`, always use its pointer `*type`
+
+# benchmark
+
+Benchmark is not necessary for this package. It does nothing actually.
+As it is just a thin wrapper to make go runtime public. 
+Both `reflect2` and `reflect` call same function 
+provided by `runtime` package exposed by go language.
+
+# unsafe safety
+
+Instead of casting `[]byte` to `sliceHeader` in your application using unsafe.
+We can use reflect2 instead. This way, if `sliceHeader` changes in the future,
+only reflect2 need to be upgraded.
+
+reflect2 tries its best to keep the implementation same as reflect (by testing).
\ No newline at end of file
diff --git a/vendor/github.com/modern-go/reflect2/go_above_17.go b/vendor/github.com/modern-go/reflect2/go_above_17.go
new file mode 100644
index 0000000..5c1cea8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_17.go
@@ -0,0 +1,8 @@
+//+build go1.7
+
+package reflect2
+
+import "unsafe"
+
+//go:linkname resolveTypeOff reflect.resolveTypeOff
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
diff --git a/vendor/github.com/modern-go/reflect2/go_above_19.go b/vendor/github.com/modern-go/reflect2/go_above_19.go
new file mode 100644
index 0000000..c7e3b78
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_above_19.go
@@ -0,0 +1,14 @@
+//+build go1.9
+
+package reflect2
+
+import (
+	"unsafe"
+)
+
+//go:linkname makemap reflect.makemap
+func makemap(rtype unsafe.Pointer, cap int) (m unsafe.Pointer)
+
+func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
+	return makemap(rtype, cap)
+}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_17.go b/vendor/github.com/modern-go/reflect2/go_below_17.go
new file mode 100644
index 0000000..65a93c8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_17.go
@@ -0,0 +1,9 @@
+//+build !go1.7
+
+package reflect2
+
+import "unsafe"
+
+func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer {
+	return nil
+}
diff --git a/vendor/github.com/modern-go/reflect2/go_below_19.go b/vendor/github.com/modern-go/reflect2/go_below_19.go
new file mode 100644
index 0000000..b050ef7
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/go_below_19.go
@@ -0,0 +1,14 @@
+//+build !go1.9
+
+package reflect2
+
+import (
+	"unsafe"
+)
+
+//go:linkname makemap reflect.makemap
+func makemap(rtype unsafe.Pointer) (m unsafe.Pointer)
+
+func makeMapWithSize(rtype unsafe.Pointer, cap int) unsafe.Pointer {
+	return makemap(rtype)
+}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2.go b/vendor/github.com/modern-go/reflect2/reflect2.go
new file mode 100644
index 0000000..63b49c7
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2.go
@@ -0,0 +1,298 @@
+package reflect2
+
+import (
+	"github.com/modern-go/concurrent"
+	"reflect"
+	"unsafe"
+)
+
+type Type interface {
+	Kind() reflect.Kind
+	// New return pointer to data of this type
+	New() interface{}
+	// UnsafeNew return the allocated space pointed by unsafe.Pointer
+	UnsafeNew() unsafe.Pointer
+	// PackEFace cast a unsafe pointer to object represented pointer
+	PackEFace(ptr unsafe.Pointer) interface{}
+	// Indirect dereference object represented pointer to this type
+	Indirect(obj interface{}) interface{}
+	// UnsafeIndirect dereference pointer to this type
+	UnsafeIndirect(ptr unsafe.Pointer) interface{}
+	// Type1 returns reflect.Type
+	Type1() reflect.Type
+	Implements(thatType Type) bool
+	String() string
+	RType() uintptr
+	// interface{} of this type has pointer like behavior
+	LikePtr() bool
+	IsNullable() bool
+	IsNil(obj interface{}) bool
+	UnsafeIsNil(ptr unsafe.Pointer) bool
+	Set(obj interface{}, val interface{})
+	UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer)
+	AssignableTo(anotherType Type) bool
+}
+
+type ListType interface {
+	Type
+	Elem() Type
+	SetIndex(obj interface{}, index int, elem interface{})
+	UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer)
+	GetIndex(obj interface{}, index int) interface{}
+	UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer
+}
+
+type ArrayType interface {
+	ListType
+	Len() int
+}
+
+type SliceType interface {
+	ListType
+	MakeSlice(length int, cap int) interface{}
+	UnsafeMakeSlice(length int, cap int) unsafe.Pointer
+	Grow(obj interface{}, newLength int)
+	UnsafeGrow(ptr unsafe.Pointer, newLength int)
+	Append(obj interface{}, elem interface{})
+	UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer)
+	LengthOf(obj interface{}) int
+	UnsafeLengthOf(ptr unsafe.Pointer) int
+	SetNil(obj interface{})
+	UnsafeSetNil(ptr unsafe.Pointer)
+	Cap(obj interface{}) int
+	UnsafeCap(ptr unsafe.Pointer) int
+}
+
+type StructType interface {
+	Type
+	NumField() int
+	Field(i int) StructField
+	FieldByName(name string) StructField
+	FieldByIndex(index []int) StructField
+	FieldByNameFunc(match func(string) bool) StructField
+}
+
+type StructField interface {
+	Offset() uintptr
+	Name() string
+	PkgPath() string
+	Type() Type
+	Tag() reflect.StructTag
+	Index() []int
+	Anonymous() bool
+	Set(obj interface{}, value interface{})
+	UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer)
+	Get(obj interface{}) interface{}
+	UnsafeGet(obj unsafe.Pointer) unsafe.Pointer
+}
+
+type MapType interface {
+	Type
+	Key() Type
+	Elem() Type
+	MakeMap(cap int) interface{}
+	UnsafeMakeMap(cap int) unsafe.Pointer
+	SetIndex(obj interface{}, key interface{}, elem interface{})
+	UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer)
+	TryGetIndex(obj interface{}, key interface{}) (interface{}, bool)
+	GetIndex(obj interface{}, key interface{}) interface{}
+	UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer
+	Iterate(obj interface{}) MapIterator
+	UnsafeIterate(obj unsafe.Pointer) MapIterator
+}
+
+type MapIterator interface {
+	HasNext() bool
+	Next() (key interface{}, elem interface{})
+	UnsafeNext() (key unsafe.Pointer, elem unsafe.Pointer)
+}
+
+type PtrType interface {
+	Type
+	Elem() Type
+}
+
+type InterfaceType interface {
+	NumMethod() int
+}
+
+type Config struct {
+	UseSafeImplementation bool
+}
+
+type API interface {
+	TypeOf(obj interface{}) Type
+	Type2(type1 reflect.Type) Type
+}
+
+var ConfigUnsafe = Config{UseSafeImplementation: false}.Froze()
+var ConfigSafe = Config{UseSafeImplementation: true}.Froze()
+
+type frozenConfig struct {
+	useSafeImplementation bool
+	cache                 *concurrent.Map
+}
+
+func (cfg Config) Froze() *frozenConfig {
+	return &frozenConfig{
+		useSafeImplementation: cfg.UseSafeImplementation,
+		cache: concurrent.NewMap(),
+	}
+}
+
+func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
+	cacheKey := uintptr(unpackEFace(obj).rtype)
+	typeObj, found := cfg.cache.Load(cacheKey)
+	if found {
+		return typeObj.(Type)
+	}
+	return cfg.Type2(reflect.TypeOf(obj))
+}
+
+func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
+	if type1 == nil {
+		return nil
+	}
+	cacheKey := uintptr(unpackEFace(type1).data)
+	typeObj, found := cfg.cache.Load(cacheKey)
+	if found {
+		return typeObj.(Type)
+	}
+	type2 := cfg.wrapType(type1)
+	cfg.cache.Store(cacheKey, type2)
+	return type2
+}
+
+func (cfg *frozenConfig) wrapType(type1 reflect.Type) Type {
+	safeType := safeType{Type: type1, cfg: cfg}
+	switch type1.Kind() {
+	case reflect.Struct:
+		if cfg.useSafeImplementation {
+			return &safeStructType{safeType}
+		}
+		return newUnsafeStructType(cfg, type1)
+	case reflect.Array:
+		if cfg.useSafeImplementation {
+			return &safeSliceType{safeType}
+		}
+		return newUnsafeArrayType(cfg, type1)
+	case reflect.Slice:
+		if cfg.useSafeImplementation {
+			return &safeSliceType{safeType}
+		}
+		return newUnsafeSliceType(cfg, type1)
+	case reflect.Map:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		return newUnsafeMapType(cfg, type1)
+	case reflect.Ptr, reflect.Chan, reflect.Func:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		return newUnsafePtrType(cfg, type1)
+	case reflect.Interface:
+		if cfg.useSafeImplementation {
+			return &safeMapType{safeType}
+		}
+		if type1.NumMethod() == 0 {
+			return newUnsafeEFaceType(cfg, type1)
+		}
+		return newUnsafeIFaceType(cfg, type1)
+	default:
+		if cfg.useSafeImplementation {
+			return &safeType
+		}
+		return newUnsafeType(cfg, type1)
+	}
+}
+
+func TypeOf(obj interface{}) Type {
+	return ConfigUnsafe.TypeOf(obj)
+}
+
+func TypeOfPtr(obj interface{}) PtrType {
+	return TypeOf(obj).(PtrType)
+}
+
+func Type2(type1 reflect.Type) Type {
+	if type1 == nil {
+		return nil
+	}
+	return ConfigUnsafe.Type2(type1)
+}
+
+func PtrTo(typ Type) Type {
+	return Type2(reflect.PtrTo(typ.Type1()))
+}
+
+func PtrOf(obj interface{}) unsafe.Pointer {
+	return unpackEFace(obj).data
+}
+
+func RTypeOf(obj interface{}) uintptr {
+	return uintptr(unpackEFace(obj).rtype)
+}
+
+func IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	return unpackEFace(obj).data == nil
+}
+
+func IsNullable(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func, reflect.Slice, reflect.Interface:
+		return true
+	}
+	return false
+}
+
+func likePtrKind(kind reflect.Kind) bool {
+	switch kind {
+	case reflect.Ptr, reflect.Map, reflect.Chan, reflect.Func:
+		return true
+	}
+	return false
+}
+
+func likePtrType(typ reflect.Type) bool {
+	if likePtrKind(typ.Kind()) {
+		return true
+	}
+	if typ.Kind() == reflect.Struct {
+		if typ.NumField() != 1 {
+			return false
+		}
+		return likePtrType(typ.Field(0).Type)
+	}
+	if typ.Kind() == reflect.Array {
+		if typ.Len() != 1 {
+			return false
+		}
+		return likePtrType(typ.Elem())
+	}
+	return false
+}
+
+// NoEscape hides a pointer from escape analysis.  noescape is
+// the identity function but escape analysis doesn't think the
+// output depends on the input.  noescape is inlined and currently
+// compiles down to zero instructions.
+// USE CAREFULLY!
+//go:nosplit
+func NoEscape(p unsafe.Pointer) unsafe.Pointer {
+	x := uintptr(p)
+	return unsafe.Pointer(x ^ 0)
+}
+
+func UnsafeCastString(str string) []byte {
+	stringHeader := (*reflect.StringHeader)(unsafe.Pointer(&str))
+	sliceHeader := &reflect.SliceHeader{
+		Data: stringHeader.Data,
+		Cap: stringHeader.Len,
+		Len: stringHeader.Len,
+	}
+	return *(*[]byte)(unsafe.Pointer(sliceHeader))
+}
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_amd64.s b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_amd64.s
diff --git a/vendor/github.com/modern-go/reflect2/reflect2_kind.go b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
new file mode 100644
index 0000000..62f299e
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/reflect2_kind.go
@@ -0,0 +1,30 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// DefaultTypeOfKind return the non aliased default type for the kind
+func DefaultTypeOfKind(kind reflect.Kind) Type {
+	return kindTypes[kind]
+}
+
+var kindTypes = map[reflect.Kind]Type{
+	reflect.Bool:          TypeOf(true),
+	reflect.Uint8:         TypeOf(uint8(0)),
+	reflect.Int8:          TypeOf(int8(0)),
+	reflect.Uint16:        TypeOf(uint16(0)),
+	reflect.Int16:         TypeOf(int16(0)),
+	reflect.Uint32:        TypeOf(uint32(0)),
+	reflect.Int32:         TypeOf(int32(0)),
+	reflect.Uint64:        TypeOf(uint64(0)),
+	reflect.Int64:         TypeOf(int64(0)),
+	reflect.Uint:          TypeOf(uint(0)),
+	reflect.Int:           TypeOf(int(0)),
+	reflect.Float32:       TypeOf(float32(0)),
+	reflect.Float64:       TypeOf(float64(0)),
+	reflect.Uintptr:       TypeOf(uintptr(0)),
+	reflect.String:        TypeOf(""),
+	reflect.UnsafePointer: TypeOf(unsafe.Pointer(nil)),
+}
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_386.s b/vendor/github.com/modern-go/reflect2/relfect2_386.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_386.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_amd64p32.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm.s b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_arm64.s b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_arm64.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mips64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_mipsx.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_ppc64x.s
diff --git a/vendor/github.com/modern-go/reflect2/relfect2_s390x.s b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/relfect2_s390x.s
diff --git a/vendor/github.com/modern-go/reflect2/safe_field.go b/vendor/github.com/modern-go/reflect2/safe_field.go
new file mode 100644
index 0000000..d4ba1f4
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_field.go
@@ -0,0 +1,58 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeField struct {
+	reflect.StructField
+}
+
+func (field *safeField) Offset() uintptr {
+	return field.StructField.Offset
+}
+
+func (field *safeField) Name() string {
+	return field.StructField.Name
+}
+
+func (field *safeField) PkgPath() string {
+	return field.StructField.PkgPath
+}
+
+func (field *safeField) Type() Type {
+	panic("not implemented")
+}
+
+func (field *safeField) Tag() reflect.StructTag {
+	return field.StructField.Tag
+}
+
+func (field *safeField) Index() []int {
+	return field.StructField.Index
+}
+
+func (field *safeField) Anonymous() bool {
+	return field.StructField.Anonymous
+}
+
+func (field *safeField) Set(obj interface{}, value interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	val.FieldByIndex(field.Index()).Set(reflect.ValueOf(value).Elem())
+}
+
+func (field *safeField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
+	panic("unsafe operation is not supported")
+}
+
+func (field *safeField) Get(obj interface{}) interface{} {
+	val := reflect.ValueOf(obj).Elem().FieldByIndex(field.Index())
+	ptr := reflect.New(val.Type())
+	ptr.Elem().Set(val)
+	return ptr.Interface()
+}
+
+func (field *safeField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_map.go b/vendor/github.com/modern-go/reflect2/safe_map.go
new file mode 100644
index 0000000..8836220
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_map.go
@@ -0,0 +1,101 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeMapType struct {
+	safeType
+}
+
+func (type2 *safeMapType) Key() Type {
+	return type2.safeType.cfg.Type2(type2.Type.Key())
+}
+
+func (type2 *safeMapType) MakeMap(cap int) interface{} {
+	ptr := reflect.New(type2.Type)
+	ptr.Elem().Set(reflect.MakeMap(type2.Type))
+	return ptr.Interface()
+}
+
+func (type2 *safeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
+	keyVal := reflect.ValueOf(key)
+	elemVal := reflect.ValueOf(elem)
+	val := reflect.ValueOf(obj)
+	val.Elem().SetMapIndex(keyVal.Elem(), elemVal.Elem())
+}
+
+func (type2 *safeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
+	keyVal := reflect.ValueOf(key)
+	if key == nil {
+		keyVal = reflect.New(type2.Type.Key()).Elem()
+	}
+	val := reflect.ValueOf(obj).MapIndex(keyVal)
+	if !val.IsValid() {
+		return nil, false
+	}
+	return val.Interface(), true
+}
+
+func (type2 *safeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
+	val := reflect.ValueOf(obj).Elem()
+	keyVal := reflect.ValueOf(key).Elem()
+	elemVal := val.MapIndex(keyVal)
+	if !elemVal.IsValid() {
+		ptr := reflect.New(reflect.PtrTo(val.Type().Elem()))
+		return ptr.Elem().Interface()
+	}
+	ptr := reflect.New(elemVal.Type())
+	ptr.Elem().Set(elemVal)
+	return ptr.Interface()
+}
+
+func (type2 *safeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeMapType) Iterate(obj interface{}) MapIterator {
+	m := reflect.ValueOf(obj).Elem()
+	return &safeMapIterator{
+		m:    m,
+		keys: m.MapKeys(),
+	}
+}
+
+func (type2 *safeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+	panic("does not support unsafe operation")
+}
+
+type safeMapIterator struct {
+	i    int
+	m    reflect.Value
+	keys []reflect.Value
+}
+
+func (iter *safeMapIterator) HasNext() bool {
+	return iter.i != len(iter.keys)
+}
+
+func (iter *safeMapIterator) Next() (interface{}, interface{}) {
+	key := iter.keys[iter.i]
+	elem := iter.m.MapIndex(key)
+	iter.i += 1
+	keyPtr := reflect.New(key.Type())
+	keyPtr.Elem().Set(key)
+	elemPtr := reflect.New(elem.Type())
+	elemPtr.Elem().Set(elem)
+	return keyPtr.Interface(), elemPtr.Interface()
+}
+
+func (iter *safeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_slice.go b/vendor/github.com/modern-go/reflect2/safe_slice.go
new file mode 100644
index 0000000..bcce6fd
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_slice.go
@@ -0,0 +1,92 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeSliceType struct {
+	safeType
+}
+
+func (type2 *safeSliceType) SetIndex(obj interface{}, index int, value interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	elem := reflect.ValueOf(value).Elem()
+	val.Index(index).Set(elem)
+}
+
+func (type2 *safeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, value unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) GetIndex(obj interface{}, index int) interface{} {
+	val := reflect.ValueOf(obj).Elem()
+	elem := val.Index(index)
+	ptr := reflect.New(elem.Type())
+	ptr.Elem().Set(elem)
+	return ptr.Interface()
+}
+
+func (type2 *safeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) MakeSlice(length int, cap int) interface{} {
+	val := reflect.MakeSlice(type2.Type, length, cap)
+	ptr := reflect.New(val.Type())
+	ptr.Elem().Set(val)
+	return ptr.Interface()
+}
+
+func (type2 *safeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Grow(obj interface{}, newLength int) {
+	oldCap := type2.Cap(obj)
+	oldSlice := reflect.ValueOf(obj).Elem()
+	delta := newLength - oldCap
+	deltaVals := make([]reflect.Value, delta)
+	newSlice := reflect.Append(oldSlice, deltaVals...)
+	oldSlice.Set(newSlice)
+}
+
+func (type2 *safeSliceType) UnsafeGrow(ptr unsafe.Pointer, newLength int) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Append(obj interface{}, elem interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	elemVal := reflect.ValueOf(elem).Elem()
+	newVal := reflect.Append(val, elemVal)
+	val.Set(newVal)
+}
+
+func (type2 *safeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) SetNil(obj interface{}) {
+	val := reflect.ValueOf(obj).Elem()
+	val.Set(reflect.Zero(val.Type()))
+}
+
+func (type2 *safeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) LengthOf(obj interface{}) int {
+	return reflect.ValueOf(obj).Elem().Len()
+}
+
+func (type2 *safeSliceType) UnsafeLengthOf(ptr unsafe.Pointer) int {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeSliceType) Cap(obj interface{}) int {
+	return reflect.ValueOf(obj).Elem().Cap()
+}
+
+func (type2 *safeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
+	panic("does not support unsafe operation")
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_struct.go b/vendor/github.com/modern-go/reflect2/safe_struct.go
new file mode 100644
index 0000000..e5fb9b3
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_struct.go
@@ -0,0 +1,29 @@
+package reflect2
+
+type safeStructType struct {
+	safeType
+}
+
+func (type2 *safeStructType) FieldByName(name string) StructField {
+	field, found := type2.Type.FieldByName(name)
+	if !found {
+		panic("field " + name + " not found")
+	}
+	return &safeField{StructField: field}
+}
+
+func (type2 *safeStructType) Field(i int) StructField {
+	return &safeField{StructField: type2.Type.Field(i)}
+}
+
+func (type2 *safeStructType) FieldByIndex(index []int) StructField {
+	return &safeField{StructField: type2.Type.FieldByIndex(index)}
+}
+
+func (type2 *safeStructType) FieldByNameFunc(match func(string) bool) StructField {
+	field, found := type2.Type.FieldByNameFunc(match)
+	if !found {
+		panic("field match condition not found in " + type2.Type.String())
+	}
+	return &safeField{StructField: field}
+}
diff --git a/vendor/github.com/modern-go/reflect2/safe_type.go b/vendor/github.com/modern-go/reflect2/safe_type.go
new file mode 100644
index 0000000..ee4e7bb
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/safe_type.go
@@ -0,0 +1,78 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type safeType struct {
+	reflect.Type
+	cfg *frozenConfig
+}
+
+func (type2 *safeType) New() interface{} {
+	return reflect.New(type2.Type).Interface()
+}
+
+func (type2 *safeType) UnsafeNew() unsafe.Pointer {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Elem() Type {
+	return type2.cfg.Type2(type2.Type.Elem())
+}
+
+func (type2 *safeType) Type1() reflect.Type {
+	return type2.Type
+}
+
+func (type2 *safeType) PackEFace(ptr unsafe.Pointer) interface{} {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Implements(thatType Type) bool {
+	return type2.Type.Implements(thatType.Type1())
+}
+
+func (type2 *safeType) RType() uintptr {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Indirect(obj interface{}) interface{} {
+	return reflect.Indirect(reflect.ValueOf(obj)).Interface()
+}
+
+func (type2 *safeType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) LikePtr() bool {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) IsNullable() bool {
+	return IsNullable(type2.Kind())
+}
+
+func (type2 *safeType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	return reflect.ValueOf(obj).Elem().IsNil()
+}
+
+func (type2 *safeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) Set(obj interface{}, val interface{}) {
+	reflect.ValueOf(obj).Elem().Set(reflect.ValueOf(val).Elem())
+}
+
+func (type2 *safeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	panic("does not support unsafe operation")
+}
+
+func (type2 *safeType) AssignableTo(anotherType Type) bool {
+	return type2.Type1().AssignableTo(anotherType.Type1())
+}
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
new file mode 100755
index 0000000..3d2b976
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
+    go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
new file mode 100644
index 0000000..6d48911
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/type_map.go
@@ -0,0 +1,103 @@
+package reflect2
+
+import (
+	"reflect"
+	"runtime"
+	"strings"
+	"unsafe"
+)
+
+// typelinks1 for 1.5 ~ 1.6
+//go:linkname typelinks1 reflect.typelinks
+func typelinks1() [][]unsafe.Pointer
+
+// typelinks2 for 1.7 ~
+//go:linkname typelinks2 reflect.typelinks
+func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
+
+var types = map[string]reflect.Type{}
+var packages = map[string]map[string]reflect.Type{}
+
+func init() {
+	ver := runtime.Version()
+	if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
+		loadGo15Types()
+	} else if ver == "go1.6" || strings.HasPrefix(ver, "go1.6.") {
+		loadGo15Types()
+	} else {
+		loadGo17Types()
+	}
+}
+
+func loadGo15Types() {
+	var obj interface{} = reflect.TypeOf(0)
+	typePtrss := typelinks1()
+	for _, typePtrs := range typePtrss {
+		for _, typePtr := range typePtrs {
+			(*emptyInterface)(unsafe.Pointer(&obj)).word = typePtr
+			typ := obj.(reflect.Type)
+			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+			if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Ptr &&
+				typ.Elem().Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem().Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+		}
+	}
+}
+
+func loadGo17Types() {
+	var obj interface{} = reflect.TypeOf(0)
+	sections, offset := typelinks2()
+	for i, offs := range offset {
+		rodata := sections[i]
+		for _, off := range offs {
+			(*emptyInterface)(unsafe.Pointer(&obj)).word = resolveTypeOff(unsafe.Pointer(rodata), off)
+			typ := obj.(reflect.Type)
+			if typ.Kind() == reflect.Ptr && typ.Elem().Kind() == reflect.Struct {
+				loadedType := typ.Elem()
+				pkgTypes := packages[loadedType.PkgPath()]
+				if pkgTypes == nil {
+					pkgTypes = map[string]reflect.Type{}
+					packages[loadedType.PkgPath()] = pkgTypes
+				}
+				types[loadedType.String()] = loadedType
+				pkgTypes[loadedType.Name()] = loadedType
+			}
+		}
+	}
+}
+
+type emptyInterface struct {
+	typ  unsafe.Pointer
+	word unsafe.Pointer
+}
+
+// TypeByName return the type by its name, just like Class.forName in java
+func TypeByName(typeName string) Type {
+	return Type2(types[typeName])
+}
+
+// TypeByPackageName return the type by its package and name
+func TypeByPackageName(pkgPath string, name string) Type {
+	pkgTypes := packages[pkgPath]
+	if pkgTypes == nil {
+		return nil
+	}
+	return Type2(pkgTypes[name])
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_array.go b/vendor/github.com/modern-go/reflect2/unsafe_array.go
new file mode 100644
index 0000000..76cbdba
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_array.go
@@ -0,0 +1,65 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeArrayType struct {
+	unsafeType
+	elemRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+	elemSize   uintptr
+	likePtr    bool
+}
+
+func newUnsafeArrayType(cfg *frozenConfig, type1 reflect.Type) *UnsafeArrayType {
+	return &UnsafeArrayType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		elemRType:  unpackEFace(type1.Elem()).data,
+		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
+		elemSize:   type1.Elem().Size(),
+		likePtr:    likePtrType(type1),
+	}
+}
+
+func (type2 *UnsafeArrayType) LikePtr() bool {
+	return type2.likePtr
+}
+
+func (type2 *UnsafeArrayType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeArrayType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	if type2.likePtr {
+		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+	}
+	return packEFace(type2.rtype, ptr)
+}
+
+func (type2 *UnsafeArrayType) SetIndex(obj interface{}, index int, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("ArrayType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("ArrayType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
+}
+
+func (type2 *UnsafeArrayType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
+	elemPtr := arrayAt(obj, index, type2.elemSize, "i < s.Len")
+	typedmemmove(type2.elemRType, elemPtr, elem)
+}
+
+func (type2 *UnsafeArrayType) GetIndex(obj interface{}, index int) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("ArrayType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeArrayType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	return arrayAt(obj, index, type2.elemSize, "i < s.Len")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_eface.go b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
new file mode 100644
index 0000000..805010f
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_eface.go
@@ -0,0 +1,59 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type eface struct {
+	rtype unsafe.Pointer
+	data  unsafe.Pointer
+}
+
+func unpackEFace(obj interface{}) *eface {
+	return (*eface)(unsafe.Pointer(&obj))
+}
+
+func packEFace(rtype unsafe.Pointer, data unsafe.Pointer) interface{} {
+	var i interface{}
+	e := (*eface)(unsafe.Pointer(&i))
+	e.rtype = rtype
+	e.data = data
+	return i
+}
+
+type UnsafeEFaceType struct {
+	unsafeType
+}
+
+func newUnsafeEFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeEFaceType {
+	return &UnsafeEFaceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafeEFaceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeEFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return unpackEFace(*(*interface{})(ptr)).data == nil
+}
+
+func (type2 *UnsafeEFaceType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeEFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return *(*interface{})(ptr)
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_field.go b/vendor/github.com/modern-go/reflect2/unsafe_field.go
new file mode 100644
index 0000000..5eb5313
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_field.go
@@ -0,0 +1,74 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeStructField struct {
+	reflect.StructField
+	structType *UnsafeStructType
+	rtype      unsafe.Pointer
+	ptrRType   unsafe.Pointer
+}
+
+func newUnsafeStructField(structType *UnsafeStructType, structField reflect.StructField) *UnsafeStructField {
+	return &UnsafeStructField{
+		StructField: structField,
+		rtype:       unpackEFace(structField.Type).data,
+		ptrRType:    unpackEFace(reflect.PtrTo(structField.Type)).data,
+		structType:  structType,
+	}
+}
+
+func (field *UnsafeStructField) Offset() uintptr {
+	return field.StructField.Offset
+}
+
+func (field *UnsafeStructField) Name() string {
+	return field.StructField.Name
+}
+
+func (field *UnsafeStructField) PkgPath() string {
+	return field.StructField.PkgPath
+}
+
+func (field *UnsafeStructField) Type() Type {
+	return field.structType.cfg.Type2(field.StructField.Type)
+}
+
+func (field *UnsafeStructField) Tag() reflect.StructTag {
+	return field.StructField.Tag
+}
+
+func (field *UnsafeStructField) Index() []int {
+	return field.StructField.Index
+}
+
+func (field *UnsafeStructField) Anonymous() bool {
+	return field.StructField.Anonymous
+}
+
+func (field *UnsafeStructField) Set(obj interface{}, value interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("StructField.SetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
+	valueEFace := unpackEFace(value)
+	assertType("StructField.SetIndex argument 2", field.ptrRType, valueEFace.rtype)
+	field.UnsafeSet(objEFace.data, valueEFace.data)
+}
+
+func (field *UnsafeStructField) UnsafeSet(obj unsafe.Pointer, value unsafe.Pointer) {
+	fieldPtr := add(obj, field.StructField.Offset, "same as non-reflect &v.field")
+	typedmemmove(field.rtype, fieldPtr, value)
+}
+
+func (field *UnsafeStructField) Get(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("StructField.GetIndex argument 1", field.structType.ptrRType, objEFace.rtype)
+	value := field.UnsafeGet(objEFace.data)
+	return packEFace(field.ptrRType, value)
+}
+
+func (field *UnsafeStructField) UnsafeGet(obj unsafe.Pointer) unsafe.Pointer {
+	return add(obj, field.StructField.Offset, "same as non-reflect &v.field")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_iface.go b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
new file mode 100644
index 0000000..b601955
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_iface.go
@@ -0,0 +1,64 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type iface struct {
+	itab *itab
+	data unsafe.Pointer
+}
+
+type itab struct {
+	ignore unsafe.Pointer
+	rtype  unsafe.Pointer
+}
+
+func IFaceToEFace(ptr unsafe.Pointer) interface{} {
+	iface := (*iface)(ptr)
+	if iface.itab == nil {
+		return nil
+	}
+	return packEFace(iface.itab.rtype, iface.data)
+}
+
+type UnsafeIFaceType struct {
+	unsafeType
+}
+
+func newUnsafeIFaceType(cfg *frozenConfig, type1 reflect.Type) *UnsafeIFaceType {
+	return &UnsafeIFaceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafeIFaceType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeIFaceType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return IFaceToEFace(ptr)
+}
+
+func (type2 *UnsafeIFaceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeIFaceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	iface := (*iface)(ptr)
+	if iface.itab == nil {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_link.go b/vendor/github.com/modern-go/reflect2/unsafe_link.go
new file mode 100644
index 0000000..57229c8
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_link.go
@@ -0,0 +1,70 @@
+package reflect2
+
+import "unsafe"
+
+//go:linkname unsafe_New reflect.unsafe_New
+func unsafe_New(rtype unsafe.Pointer) unsafe.Pointer
+
+//go:linkname typedmemmove reflect.typedmemmove
+func typedmemmove(rtype unsafe.Pointer, dst, src unsafe.Pointer)
+
+//go:linkname unsafe_NewArray reflect.unsafe_NewArray
+func unsafe_NewArray(rtype unsafe.Pointer, length int) unsafe.Pointer
+
+// typedslicecopy copies a slice of elemType values from src to dst,
+// returning the number of elements copied.
+//go:linkname typedslicecopy reflect.typedslicecopy
+//go:noescape
+func typedslicecopy(elemType unsafe.Pointer, dst, src sliceHeader) int
+
+//go:linkname mapassign reflect.mapassign
+//go:noescape
+func mapassign(rtype unsafe.Pointer, m unsafe.Pointer, key, val unsafe.Pointer)
+
+//go:linkname mapaccess reflect.mapaccess
+//go:noescape
+func mapaccess(rtype unsafe.Pointer, m unsafe.Pointer, key unsafe.Pointer) (val unsafe.Pointer)
+
+// m escapes into the return value, but the caller of mapiterinit
+// doesn't let the return value escape.
+//go:noescape
+//go:linkname mapiterinit reflect.mapiterinit
+func mapiterinit(rtype unsafe.Pointer, m unsafe.Pointer) *hiter
+
+//go:noescape
+//go:linkname mapiternext reflect.mapiternext
+func mapiternext(it *hiter)
+
+//go:linkname ifaceE2I reflect.ifaceE2I
+func ifaceE2I(rtype unsafe.Pointer, src interface{}, dst unsafe.Pointer)
+
+// A hash iteration structure.
+// If you modify hiter, also change cmd/internal/gc/reflect.go to indicate
+// the layout of this structure.
+type hiter struct {
+	key   unsafe.Pointer // Must be in first position.  Write nil to indicate iteration end (see cmd/internal/gc/range.go).
+	value unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
+	// rest fields are ignored
+}
+
+// add returns p+x.
+//
+// The whySafe string is ignored, so that the function still inlines
+// as efficiently as p+x, but all call sites should use the string to
+// record why the addition is safe, which is to say why the addition
+// does not cause x to advance to the very end of p's allocation
+// and therefore point incorrectly at the next block in memory.
+func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
+	return unsafe.Pointer(uintptr(p) + x)
+}
+
+// arrayAt returns the i-th element of p,
+// an array whose elements are eltSize bytes wide.
+// The array pointed at by p must have at least i+1 elements:
+// it is invalid (but impossible to check here) to pass i >= len,
+// because then the result will point outside the array.
+// whySafe must explain why i < len. (Passing "i < len" is fine;
+// the benefit is to surface this assumption at the call site.)
+func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
+	return add(p, uintptr(i)*eltSize, "i < len")
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_map.go b/vendor/github.com/modern-go/reflect2/unsafe_map.go
new file mode 100644
index 0000000..f2e76e6
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_map.go
@@ -0,0 +1,138 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeMapType struct {
+	unsafeType
+	pKeyRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+}
+
+func newUnsafeMapType(cfg *frozenConfig, type1 reflect.Type) MapType {
+	return &UnsafeMapType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		pKeyRType:  unpackEFace(reflect.PtrTo(type1.Key())).data,
+		pElemRType: unpackEFace(reflect.PtrTo(type1.Elem())).data,
+	}
+}
+
+func (type2 *UnsafeMapType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return *(*unsafe.Pointer)(ptr) == nil
+}
+
+func (type2 *UnsafeMapType) LikePtr() bool {
+	return true
+}
+
+func (type2 *UnsafeMapType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+}
+
+func (type2 *UnsafeMapType) Key() Type {
+	return type2.cfg.Type2(type2.Type.Key())
+}
+
+func (type2 *UnsafeMapType) MakeMap(cap int) interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeMakeMap(cap))
+}
+
+func (type2 *UnsafeMapType) UnsafeMakeMap(cap int) unsafe.Pointer {
+	m := makeMapWithSize(type2.rtype, cap)
+	return unsafe.Pointer(&m)
+}
+
+func (type2 *UnsafeMapType) SetIndex(obj interface{}, key interface{}, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.SetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("MapType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, keyEFace.data, elemEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeSetIndex(obj unsafe.Pointer, key unsafe.Pointer, elem unsafe.Pointer) {
+	mapassign(type2.rtype, *(*unsafe.Pointer)(obj), key, elem)
+}
+
+func (type2 *UnsafeMapType) TryGetIndex(obj interface{}, key interface{}) (interface{}, bool) {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.TryGetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.TryGetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
+	if elemPtr == nil {
+		return nil, false
+	}
+	return packEFace(type2.pElemRType, elemPtr), true
+}
+
+func (type2 *UnsafeMapType) GetIndex(obj interface{}, key interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	keyEFace := unpackEFace(key)
+	assertType("MapType.GetIndex argument 2", type2.pKeyRType, keyEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, keyEFace.data)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeMapType) UnsafeGetIndex(obj unsafe.Pointer, key unsafe.Pointer) unsafe.Pointer {
+	return mapaccess(type2.rtype, *(*unsafe.Pointer)(obj), key)
+}
+
+func (type2 *UnsafeMapType) Iterate(obj interface{}) MapIterator {
+	objEFace := unpackEFace(obj)
+	assertType("MapType.Iterate argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIterate(objEFace.data)
+}
+
+func (type2 *UnsafeMapType) UnsafeIterate(obj unsafe.Pointer) MapIterator {
+	return &UnsafeMapIterator{
+		hiter:      mapiterinit(type2.rtype, *(*unsafe.Pointer)(obj)),
+		pKeyRType:  type2.pKeyRType,
+		pElemRType: type2.pElemRType,
+	}
+}
+
+type UnsafeMapIterator struct {
+	*hiter
+	pKeyRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+}
+
+func (iter *UnsafeMapIterator) HasNext() bool {
+	return iter.key != nil
+}
+
+func (iter *UnsafeMapIterator) Next() (interface{}, interface{}) {
+	key, elem := iter.UnsafeNext()
+	return packEFace(iter.pKeyRType, key), packEFace(iter.pElemRType, elem)
+}
+
+func (iter *UnsafeMapIterator) UnsafeNext() (unsafe.Pointer, unsafe.Pointer) {
+	key := iter.key
+	elem := iter.value
+	mapiternext(iter.hiter)
+	return key, elem
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_ptr.go b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
new file mode 100644
index 0000000..8e5ec9c
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_ptr.go
@@ -0,0 +1,46 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafePtrType struct {
+	unsafeType
+}
+
+func newUnsafePtrType(cfg *frozenConfig, type1 reflect.Type) *UnsafePtrType {
+	return &UnsafePtrType{
+		unsafeType: *newUnsafeType(cfg, type1),
+	}
+}
+
+func (type2 *UnsafePtrType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafePtrType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return *(*unsafe.Pointer)(ptr) == nil
+}
+
+func (type2 *UnsafePtrType) LikePtr() bool {
+	return true
+}
+
+func (type2 *UnsafePtrType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafePtrType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_slice.go b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
new file mode 100644
index 0000000..1c6d876
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_slice.go
@@ -0,0 +1,177 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+// sliceHeader is a safe version of SliceHeader used within this package.
+type sliceHeader struct {
+	Data unsafe.Pointer
+	Len  int
+	Cap  int
+}
+
+type UnsafeSliceType struct {
+	unsafeType
+	elemRType  unsafe.Pointer
+	pElemRType unsafe.Pointer
+	elemSize   uintptr
+}
+
+func newUnsafeSliceType(cfg *frozenConfig, type1 reflect.Type) SliceType {
+	elemType := type1.Elem()
+	return &UnsafeSliceType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		pElemRType: unpackEFace(reflect.PtrTo(elemType)).data,
+		elemRType:  unpackEFace(elemType).data,
+		elemSize:   elemType.Size(),
+	}
+}
+
+func (type2 *UnsafeSliceType) Set(obj interface{}, val interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
+	valEFace := unpackEFace(val)
+	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
+	type2.UnsafeSet(objEFace.data, valEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	*(*sliceHeader)(ptr) = *(*sliceHeader)(val)
+}
+
+func (type2 *UnsafeSliceType) IsNil(obj interface{}) bool {
+	if obj == nil {
+		return true
+	}
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	if ptr == nil {
+		return true
+	}
+	return (*sliceHeader)(ptr).Data == nil
+}
+
+func (type2 *UnsafeSliceType) SetNil(obj interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.SetNil argument 1", type2.ptrRType, objEFace.rtype)
+	type2.UnsafeSetNil(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSetNil(ptr unsafe.Pointer) {
+	header := (*sliceHeader)(ptr)
+	header.Len = 0
+	header.Cap = 0
+	header.Data = nil
+}
+
+func (type2 *UnsafeSliceType) MakeSlice(length int, cap int) interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeMakeSlice(length, cap))
+}
+
+func (type2 *UnsafeSliceType) UnsafeMakeSlice(length int, cap int) unsafe.Pointer {
+	header := &sliceHeader{unsafe_NewArray(type2.elemRType, cap), length, cap}
+	return unsafe.Pointer(header)
+}
+
+func (type2 *UnsafeSliceType) LengthOf(obj interface{}) int {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Len argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeLengthOf(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeLengthOf(obj unsafe.Pointer) int {
+	header := (*sliceHeader)(obj)
+	return header.Len
+}
+
+func (type2 *UnsafeSliceType) SetIndex(obj interface{}, index int, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.SetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("SliceType.SetIndex argument 3", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeSetIndex(objEFace.data, index, elemEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeSetIndex(obj unsafe.Pointer, index int, elem unsafe.Pointer) {
+	header := (*sliceHeader)(obj)
+	elemPtr := arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
+	typedmemmove(type2.elemRType, elemPtr, elem)
+}
+
+func (type2 *UnsafeSliceType) GetIndex(obj interface{}, index int) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.GetIndex argument 1", type2.ptrRType, objEFace.rtype)
+	elemPtr := type2.UnsafeGetIndex(objEFace.data, index)
+	return packEFace(type2.pElemRType, elemPtr)
+}
+
+func (type2 *UnsafeSliceType) UnsafeGetIndex(obj unsafe.Pointer, index int) unsafe.Pointer {
+	header := (*sliceHeader)(obj)
+	return arrayAt(header.Data, index, type2.elemSize, "i < s.Len")
+}
+
+func (type2 *UnsafeSliceType) Append(obj interface{}, elem interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Append argument 1", type2.ptrRType, objEFace.rtype)
+	elemEFace := unpackEFace(elem)
+	assertType("SliceType.Append argument 2", type2.pElemRType, elemEFace.rtype)
+	type2.UnsafeAppend(objEFace.data, elemEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeAppend(obj unsafe.Pointer, elem unsafe.Pointer) {
+	header := (*sliceHeader)(obj)
+	oldLen := header.Len
+	type2.UnsafeGrow(obj, oldLen+1)
+	type2.UnsafeSetIndex(obj, oldLen, elem)
+}
+
+func (type2 *UnsafeSliceType) Cap(obj interface{}) int {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Cap argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeCap(objEFace.data)
+}
+
+func (type2 *UnsafeSliceType) UnsafeCap(ptr unsafe.Pointer) int {
+	return (*sliceHeader)(ptr).Cap
+}
+
+func (type2 *UnsafeSliceType) Grow(obj interface{}, newLength int) {
+	objEFace := unpackEFace(obj)
+	assertType("SliceType.Grow argument 1", type2.ptrRType, objEFace.rtype)
+	type2.UnsafeGrow(objEFace.data, newLength)
+}
+
+func (type2 *UnsafeSliceType) UnsafeGrow(obj unsafe.Pointer, newLength int) {
+	header := (*sliceHeader)(obj)
+	if newLength <= header.Cap {
+		header.Len = newLength
+		return
+	}
+	newCap := calcNewCap(header.Cap, newLength)
+	newHeader := (*sliceHeader)(type2.UnsafeMakeSlice(header.Len, newCap))
+	typedslicecopy(type2.elemRType, *newHeader, *header)
+	header.Data = newHeader.Data
+	header.Cap = newHeader.Cap
+	header.Len = newLength
+}
+
+func calcNewCap(cap int, expectedCap int) int {
+	if cap == 0 {
+		cap = expectedCap
+	} else {
+		for cap < expectedCap {
+			if cap < 1024 {
+				cap += cap
+			} else {
+				cap += cap / 4
+			}
+		}
+	}
+	return cap
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_struct.go b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
new file mode 100644
index 0000000..804d916
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_struct.go
@@ -0,0 +1,59 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type UnsafeStructType struct {
+	unsafeType
+	likePtr bool
+}
+
+func newUnsafeStructType(cfg *frozenConfig, type1 reflect.Type) *UnsafeStructType {
+	return &UnsafeStructType{
+		unsafeType: *newUnsafeType(cfg, type1),
+		likePtr:    likePtrType(type1),
+	}
+}
+
+func (type2 *UnsafeStructType) LikePtr() bool {
+	return type2.likePtr
+}
+
+func (type2 *UnsafeStructType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *UnsafeStructType) UnsafeIndirect(ptr unsafe.Pointer) interface{} {
+	if type2.likePtr {
+		return packEFace(type2.rtype, *(*unsafe.Pointer)(ptr))
+	}
+	return packEFace(type2.rtype, ptr)
+}
+
+func (type2 *UnsafeStructType) FieldByName(name string) StructField {
+	structField, found := type2.Type.FieldByName(name)
+	if !found {
+		return nil
+	}
+	return newUnsafeStructField(type2, structField)
+}
+
+func (type2 *UnsafeStructType) Field(i int) StructField {
+	return newUnsafeStructField(type2, type2.Type.Field(i))
+}
+
+func (type2 *UnsafeStructType) FieldByIndex(index []int) StructField {
+	return newUnsafeStructField(type2, type2.Type.FieldByIndex(index))
+}
+
+func (type2 *UnsafeStructType) FieldByNameFunc(match func(string) bool) StructField {
+	structField, found := type2.Type.FieldByNameFunc(match)
+	if !found {
+		panic("field match condition not found in " + type2.Type.String())
+	}
+	return newUnsafeStructField(type2, structField)
+}
diff --git a/vendor/github.com/modern-go/reflect2/unsafe_type.go b/vendor/github.com/modern-go/reflect2/unsafe_type.go
new file mode 100644
index 0000000..1394171
--- /dev/null
+++ b/vendor/github.com/modern-go/reflect2/unsafe_type.go
@@ -0,0 +1,85 @@
+package reflect2
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+type unsafeType struct {
+	safeType
+	rtype    unsafe.Pointer
+	ptrRType unsafe.Pointer
+}
+
+func newUnsafeType(cfg *frozenConfig, type1 reflect.Type) *unsafeType {
+	return &unsafeType{
+		safeType: safeType{
+			Type: type1,
+			cfg:  cfg,
+		},
+		rtype:    unpackEFace(type1).data,
+		ptrRType: unpackEFace(reflect.PtrTo(type1)).data,
+	}
+}
+
+func (type2 *unsafeType) Set(obj interface{}, val interface{}) {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Set argument 1", type2.ptrRType, objEFace.rtype)
+	valEFace := unpackEFace(val)
+	assertType("Type.Set argument 2", type2.ptrRType, valEFace.rtype)
+	type2.UnsafeSet(objEFace.data, valEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeSet(ptr unsafe.Pointer, val unsafe.Pointer) {
+	typedmemmove(type2.rtype, ptr, val)
+}
+
+func (type2 *unsafeType) IsNil(obj interface{}) bool {
+	objEFace := unpackEFace(obj)
+	assertType("Type.IsNil argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIsNil(objEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeIsNil(ptr unsafe.Pointer) bool {
+	return ptr == nil
+}
+
+func (type2 *unsafeType) UnsafeNew() unsafe.Pointer {
+	return unsafe_New(type2.rtype)
+}
+
+func (type2 *unsafeType) New() interface{} {
+	return packEFace(type2.ptrRType, type2.UnsafeNew())
+}
+
+func (type2 *unsafeType) PackEFace(ptr unsafe.Pointer) interface{} {
+	return packEFace(type2.ptrRType, ptr)
+}
+
+func (type2 *unsafeType) RType() uintptr {
+	return uintptr(type2.rtype)
+}
+
+func (type2 *unsafeType) Indirect(obj interface{}) interface{} {
+	objEFace := unpackEFace(obj)
+	assertType("Type.Indirect argument 1", type2.ptrRType, objEFace.rtype)
+	return type2.UnsafeIndirect(objEFace.data)
+}
+
+func (type2 *unsafeType) UnsafeIndirect(obj unsafe.Pointer) interface{} {
+	return packEFace(type2.rtype, obj)
+}
+
+func (type2 *unsafeType) LikePtr() bool {
+	return false
+}
+
+func assertType(where string, expectRType unsafe.Pointer, actualRType unsafe.Pointer) {
+	if expectRType != actualRType {
+		expectType := reflect.TypeOf(0)
+		(*iface)(unsafe.Pointer(&expectType)).data = expectRType
+		actualType := reflect.TypeOf(0)
+		(*iface)(unsafe.Pointer(&actualType)).data = actualRType
+		panic(where + ": expect " + expectType.String() + ", actual " + actualType.String())
+	}
+}
diff --git a/vendor/github.com/spf13/pflag/.gitignore b/vendor/github.com/spf13/pflag/.gitignore
new file mode 100644
index 0000000..c3da290
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.gitignore
@@ -0,0 +1,2 @@
+.idea/*
+
diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml
new file mode 100644
index 0000000..f8a63b3
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/.travis.yml
@@ -0,0 +1,21 @@
+sudo: false
+
+language: go
+
+go:
+  - 1.7.3
+  - 1.8.1
+  - tip
+
+matrix:
+  allow_failures:
+    - go: tip
+
+install:
+  - go get github.com/golang/lint/golint
+  - export PATH=$GOPATH/bin:$PATH
+  - go install ./...
+
+script:
+  - verify/all.sh -v
+  - go test ./...
diff --git a/vendor/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE
new file mode 100644
index 0000000..63ed1cf
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Alex Ogier. All rights reserved.
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
new file mode 100644
index 0000000..b052414
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -0,0 +1,296 @@
+[![Build Status](https://travis-ci.org/spf13/pflag.svg?branch=master)](https://travis-ci.org/spf13/pflag)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/pflag)](https://goreportcard.com/report/github.com/spf13/pflag)
+[![GoDoc](https://godoc.org/github.com/spf13/pflag?status.svg)](https://godoc.org/github.com/spf13/pflag)
+
+## Description
+
+pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the [GNU extensions to the POSIX recommendations
+for command-line options][1]. For a more precise description, see the
+"Command-line flag syntax" section below.
+
+[1]: http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+pflag is available under the same style of BSD license as the Go language,
+which can be found in the LICENSE file.
+
+## Installation
+
+pflag is available using the standard `go get` command.
+
+Install by running:
+
+    go get github.com/spf13/pflag
+
+Run tests by running:
+
+    go test github.com/spf13/pflag
+
+## Usage
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+``` go
+import flag "github.com/spf13/pflag"
+```
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+
+``` go
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+```
+
+If you like, you can bind the flag to a variable using the Var() functions.
+
+``` go
+var flagvar int
+func init() {
+    flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+}
+```
+
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+
+``` go
+flag.Var(&flagVal, "name", "help message for flagname")
+```
+
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+
+``` go
+flag.Parse()
+```
+
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+
+``` go
+fmt.Println("ip has value ", *ip)
+fmt.Println("flagvar has value ", flagvar)
+```
+
+There are helpers function to get values later if you have the FlagSet but
+it was difficult to keep up with all of the flag pointers in your code.
+If you have a pflag.FlagSet with a flag called 'flagname' of type int you
+can use GetInt() to get the int value. But notice that 'flagname' must exist
+and it must be an int. GetString("flagname") will fail.
+
+``` go
+i, err := flagset.GetInt("flagname")
+```
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+var flagvar bool
+func init() {
+	flag.BoolVarP(&flagvar, "boolname", "b", true, "help message")
+}
+flag.VarP(&flagVal, "varname", "v", "help message")
+```
+
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+The default set of command-line flags is controlled by
+top-level functions.  The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+
+## Setting no option default values for flags
+
+After you create a flag it is possible to set the pflag.NoOptDefVal for
+the given flag. Doing this changes the meaning of the flag slightly. If
+a flag has a NoOptDefVal and the flag is set on the command line without
+an option the flag will be set to the NoOptDefVal. For example given:
+
+``` go
+var ip = flag.IntP("flagname", "f", 1234, "help message")
+flag.Lookup("flagname").NoOptDefVal = "4321"
+```
+
+Would result in something like
+
+| Parsed Arguments | Resulting Value |
+| -------------    | -------------   |
+| --flagname=1357  | ip=1357         |
+| --flagname       | ip=4321         |
+| [nothing]        | ip=1234         |
+
+## Command line flag syntax
+
+```
+--flag    // boolean flags, or flags with no option default values
+--flag x  // only on flags without a default value
+--flag=x
+```
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags
+or a flag with a default value
+
+```
+// boolean or flags where the 'no option default value' is set
+-f
+-f=true
+-abc
+but
+-b true is INVALID
+
+// non-boolean and flags without a 'no option default value'
+-n 1234
+-n=1234
+-n1234
+
+// mixed
+-abcs "hello"
+-absd="hello"
+-abcs1234
+```
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+## Mutating or "Normalizing" Flag names
+
+It is possible to set a custom flag name 'normalization function.' It allows flag names to be mutated both when created in the code and when used on the command line to some 'normalized' form. The 'normalized' form is used for comparison. Two examples of using the custom normalization func follow.
+
+**Example #1**: You want -, _, and . in flags to compare the same. aka --my-flag == --my_flag == --my.flag
+
+``` go
+func wordSepNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	from := []string{"-", "_"}
+	to := "."
+	for _, sep := range from {
+		name = strings.Replace(name, sep, to, -1)
+	}
+	return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(wordSepNormalizeFunc)
+```
+
+**Example #2**: You want to alias two flags. aka --old-flag-name == --new-flag-name
+
+``` go
+func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName {
+	switch name {
+	case "old-flag-name":
+		name = "new-flag-name"
+		break
+	}
+	return pflag.NormalizedName(name)
+}
+
+myFlagSet.SetNormalizeFunc(aliasNormalizeFunc)
+```
+
+## Deprecating a flag or its shorthand
+It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used.
+
+**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead.
+```go
+// deprecate a flag by specifying its name and a usage message
+flags.MarkDeprecated("badflag", "please use --good-flag instead")
+```
+This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used.
+
+**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n".
+```go
+// deprecate a flag shorthand by specifying its flag name and a usage message
+flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only")
+```
+This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used.
+
+Note that usage message is essential here, and it should not be empty.
+
+## Hidden flags
+It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text.
+
+**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available.
+```go
+// hide a flag by specifying its name
+flags.MarkHidden("secretFlag")
+```
+
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+  -v, --verbose           verbose output
+      --coolflag string   it's really cool flag (default "yeaah")
+      --usefulflag int    sometimes it's very useful (default 777)
+```
+
+
+## Supporting Go flags when using pflag
+In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
+to support flags defined by third-party dependencies (e.g. `golang/glog`).
+
+**Example**: You want to add the Go flags to the `CommandLine` flagset
+```go
+import (
+	goflag "flag"
+	flag "github.com/spf13/pflag"
+)
+
+var ip *int = flag.Int("flagname", 1234, "help message for flagname")
+
+func main() {
+	flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
+	flag.Parse()
+}
+```
+
+## More info
+
+You can see the full reference documentation of the pflag package
+[at godoc.org][3], or through go's standard documentation system by
+running `godoc -http=:6060` and browsing to
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
+installation.
+
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go
new file mode 100644
index 0000000..c4c5c0b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool.go
@@ -0,0 +1,94 @@
+package pflag
+
+import "strconv"
+
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+	Value
+	IsBoolFlag() bool
+}
+
+// -- bool Value
+type boolValue bool
+
+func newBoolValue(val bool, p *bool) *boolValue {
+	*p = val
+	return (*boolValue)(p)
+}
+
+func (b *boolValue) Set(s string) error {
+	v, err := strconv.ParseBool(s)
+	*b = boolValue(v)
+	return err
+}
+
+func (b *boolValue) Type() string {
+	return "bool"
+}
+
+func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) }
+
+func (b *boolValue) IsBoolFlag() bool { return true }
+
+func boolConv(sval string) (interface{}, error) {
+	return strconv.ParseBool(sval)
+}
+
+// GetBool return the bool value of a flag with the given name
+func (f *FlagSet) GetBool(name string) (bool, error) {
+	val, err := f.getFlagType(name, "bool", boolConv)
+	if err != nil {
+		return false, err
+	}
+	return val.(bool), nil
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
+	f.BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// BoolVar defines a bool flag with specified name, default value, and usage string.
+// The argument p points to a bool variable in which to store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+	BoolVarP(p, name, "", value, usage)
+}
+
+// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolVarP(p *bool, name, shorthand string, value bool, usage string) {
+	flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage)
+	flag.NoOptDefVal = "true"
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func (f *FlagSet) Bool(name string, value bool, usage string) *bool {
+	return f.BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool {
+	p := new(bool)
+	f.BoolVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Bool defines a bool flag with specified name, default value, and usage string.
+// The return value is the address of a bool variable that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+	return BoolP(name, "", value, usage)
+}
+
+// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash.
+func BoolP(name, shorthand string, value bool, usage string) *bool {
+	b := CommandLine.BoolP(name, shorthand, value, usage)
+	return b
+}
diff --git a/vendor/github.com/spf13/pflag/bool_slice.go b/vendor/github.com/spf13/pflag/bool_slice.go
new file mode 100644
index 0000000..5af02f1
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bool_slice.go
@@ -0,0 +1,147 @@
+package pflag
+
+import (
+	"io"
+	"strconv"
+	"strings"
+)
+
+// -- boolSlice Value
+type boolSliceValue struct {
+	value   *[]bool
+	changed bool
+}
+
+func newBoolSliceValue(val []bool, p *[]bool) *boolSliceValue {
+	bsv := new(boolSliceValue)
+	bsv.value = p
+	*bsv.value = val
+	return bsv
+}
+
+// Set converts, and assigns, the comma-separated boolean argument string representation as the []bool value of this flag.
+// If Set is called on a flag that already has a []bool assigned, the newly converted values will be appended.
+func (s *boolSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	boolStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse boolean values into slice
+	out := make([]bool, 0, len(boolStrSlice))
+	for _, boolStr := range boolStrSlice {
+		b, err := strconv.ParseBool(strings.TrimSpace(boolStr))
+		if err != nil {
+			return err
+		}
+		out = append(out, b)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *boolSliceValue) Type() string {
+	return "boolSlice"
+}
+
+// String defines a "native" format for this boolean slice flag value.
+func (s *boolSliceValue) String() string {
+
+	boolStrSlice := make([]string, len(*s.value))
+	for i, b := range *s.value {
+		boolStrSlice[i] = strconv.FormatBool(b)
+	}
+
+	out, _ := writeAsCSV(boolStrSlice)
+
+	return "[" + out + "]"
+}
+
+func boolSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []bool{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]bool, len(ss))
+	for i, t := range ss {
+		var err error
+		out[i], err = strconv.ParseBool(t)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetBoolSlice returns the []bool value of a flag with the given name.
+func (f *FlagSet) GetBoolSlice(name string) ([]bool, error) {
+	val, err := f.getFlagType(name, "boolSlice", boolSliceConv)
+	if err != nil {
+		return []bool{}, err
+	}
+	return val.([]bool), nil
+}
+
+// BoolSliceVar defines a boolSlice flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func (f *FlagSet) BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	f.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSliceVar defines a []bool flag with specified name, default value, and usage string.
+// The argument p points to a []bool variable in which to store the value of the flag.
+func BoolSliceVar(p *[]bool, name string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, "", usage)
+}
+
+// BoolSliceVarP is like BoolSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceVarP(p *[]bool, name, shorthand string, value []bool, usage string) {
+	CommandLine.VarP(newBoolSliceValue(value, p), name, shorthand, usage)
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func (f *FlagSet) BoolSlice(name string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	p := []bool{}
+	f.BoolSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// BoolSlice defines a []bool flag with specified name, default value, and usage string.
+// The return value is the address of a []bool variable that stores the value of the flag.
+func BoolSlice(name string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, "", value, usage)
+}
+
+// BoolSliceP is like BoolSlice, but accepts a shorthand letter that can be used after a single dash.
+func BoolSliceP(name, shorthand string, value []bool, usage string) *[]bool {
+	return CommandLine.BoolSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go
new file mode 100644
index 0000000..67d5304
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/bytes.go
@@ -0,0 +1,209 @@
+package pflag
+
+import (
+	"encoding/base64"
+	"encoding/hex"
+	"fmt"
+	"strings"
+)
+
+// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded
+type bytesHexValue []byte
+
+// String implements pflag.Value.String.
+func (bytesHex bytesHexValue) String() string {
+	return fmt.Sprintf("%X", []byte(bytesHex))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesHex *bytesHexValue) Set(value string) error {
+	bin, err := hex.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesHex = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesHexValue) Type() string {
+	return "bytesHex"
+}
+
+func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue {
+	*p = val
+	return (*bytesHexValue)(p)
+}
+
+func bytesHexConv(sval string) (interface{}, error) {
+
+	bin, err := hex.DecodeString(sval)
+
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesHex return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesHex(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesHex", bytesHexConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHexVar defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesHexVar(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, "", usage)
+}
+
+// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage)
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesHexVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesHex defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesHex(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, "", value, usage)
+}
+
+// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash.
+func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesHexP(name, shorthand, value, usage)
+}
+
+// BytesBase64 adapts []byte for use as a flag. Value of flag is Base64 encoded
+type bytesBase64Value []byte
+
+// String implements pflag.Value.String.
+func (bytesBase64 bytesBase64Value) String() string {
+	return base64.StdEncoding.EncodeToString([]byte(bytesBase64))
+}
+
+// Set implements pflag.Value.Set.
+func (bytesBase64 *bytesBase64Value) Set(value string) error {
+	bin, err := base64.StdEncoding.DecodeString(strings.TrimSpace(value))
+
+	if err != nil {
+		return err
+	}
+
+	*bytesBase64 = bin
+
+	return nil
+}
+
+// Type implements pflag.Value.Type.
+func (*bytesBase64Value) Type() string {
+	return "bytesBase64"
+}
+
+func newBytesBase64Value(val []byte, p *[]byte) *bytesBase64Value {
+	*p = val
+	return (*bytesBase64Value)(p)
+}
+
+func bytesBase64ValueConv(sval string) (interface{}, error) {
+
+	bin, err := base64.StdEncoding.DecodeString(sval)
+	if err == nil {
+		return bin, nil
+	}
+
+	return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err)
+}
+
+// GetBytesBase64 return the []byte value of a flag with the given name
+func (f *FlagSet) GetBytesBase64(name string) ([]byte, error) {
+	val, err := f.getFlagType(name, "bytesBase64", bytesBase64ValueConv)
+
+	if err != nil {
+		return []byte{}, err
+	}
+
+	return val.([]byte), nil
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func (f *FlagSet) BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	f.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64Var defines an []byte flag with specified name, default value, and usage string.
+// The argument p points to an []byte variable in which to store the value of the flag.
+func BytesBase64Var(p *[]byte, name string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, "", usage)
+}
+
+// BytesBase64VarP is like BytesBase64Var, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64VarP(p *[]byte, name, shorthand string, value []byte, usage string) {
+	CommandLine.VarP(newBytesBase64Value(value, p), name, shorthand, usage)
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func (f *FlagSet) BytesBase64(name string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, "", value, usage)
+	return p
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	p := new([]byte)
+	f.BytesBase64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// BytesBase64 defines an []byte flag with specified name, default value, and usage string.
+// The return value is the address of an []byte variable that stores the value of the flag.
+func BytesBase64(name string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, "", value, usage)
+}
+
+// BytesBase64P is like BytesBase64, but accepts a shorthand letter that can be used after a single dash.
+func BytesBase64P(name, shorthand string, value []byte, usage string) *[]byte {
+	return CommandLine.BytesBase64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
new file mode 100644
index 0000000..aa126e4
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -0,0 +1,96 @@
+package pflag
+
+import "strconv"
+
+// -- count Value
+type countValue int
+
+func newCountValue(val int, p *int) *countValue {
+	*p = val
+	return (*countValue)(p)
+}
+
+func (i *countValue) Set(s string) error {
+	// "+1" means that no specific value was passed, so increment
+	if s == "+1" {
+		*i = countValue(*i + 1)
+		return nil
+	}
+	v, err := strconv.ParseInt(s, 0, 0)
+	*i = countValue(v)
+	return err
+}
+
+func (i *countValue) Type() string {
+	return "count"
+}
+
+func (i *countValue) String() string { return strconv.Itoa(int(*i)) }
+
+func countConv(sval string) (interface{}, error) {
+	i, err := strconv.Atoi(sval)
+	if err != nil {
+		return nil, err
+	}
+	return i, nil
+}
+
+// GetCount return the int value of a flag with the given name
+func (f *FlagSet) GetCount(name string) (int, error) {
+	val, err := f.getFlagType(name, "count", countConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// CountVar defines a count flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) CountVar(p *int, name string, usage string) {
+	f.CountVarP(p, name, "", usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
+	flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
+	flag.NoOptDefVal = "+1"
+}
+
+// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
+func CountVar(p *int, name string, usage string) {
+	CommandLine.CountVar(p, name, usage)
+}
+
+// CountVarP is like CountVar only take a shorthand for the flag name.
+func CountVarP(p *int, name, shorthand string, usage string) {
+	CommandLine.CountVarP(p, name, shorthand, usage)
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func (f *FlagSet) Count(name string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, "", usage)
+	return p
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
+	p := new(int)
+	f.CountVarP(p, name, shorthand, usage)
+	return p
+}
+
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
+func Count(name string, usage string) *int {
+	return CommandLine.CountP(name, "", usage)
+}
+
+// CountP is like Count only takes a shorthand for the flag name.
+func CountP(name, shorthand string, usage string) *int {
+	return CommandLine.CountP(name, shorthand, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go
new file mode 100644
index 0000000..e9debef
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration.go
@@ -0,0 +1,86 @@
+package pflag
+
+import (
+	"time"
+)
+
+// -- time.Duration Value
+type durationValue time.Duration
+
+func newDurationValue(val time.Duration, p *time.Duration) *durationValue {
+	*p = val
+	return (*durationValue)(p)
+}
+
+func (d *durationValue) Set(s string) error {
+	v, err := time.ParseDuration(s)
+	*d = durationValue(v)
+	return err
+}
+
+func (d *durationValue) Type() string {
+	return "duration"
+}
+
+func (d *durationValue) String() string { return (*time.Duration)(d).String() }
+
+func durationConv(sval string) (interface{}, error) {
+	return time.ParseDuration(sval)
+}
+
+// GetDuration return the duration value of a flag with the given name
+func (f *FlagSet) GetDuration(name string) (time.Duration, error) {
+	val, err := f.getFlagType(name, "duration", durationConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(time.Duration), nil
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	f.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name, default value, and usage string.
+// The argument p points to a time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, "", usage)
+}
+
+// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) {
+	CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, "", value, usage)
+	return p
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	p := new(time.Duration)
+	f.DurationVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Duration defines a time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, "", value, usage)
+}
+
+// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash.
+func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration {
+	return CommandLine.DurationP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go
new file mode 100644
index 0000000..52c6b6d
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/duration_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strings"
+	"time"
+)
+
+// -- durationSlice Value
+type durationSliceValue struct {
+	value   *[]time.Duration
+	changed bool
+}
+
+func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue {
+	dsv := new(durationSliceValue)
+	dsv.value = p
+	*dsv.value = val
+	return dsv
+}
+
+func (s *durationSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *durationSliceValue) Type() string {
+	return "durationSlice"
+}
+
+func (s *durationSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%s", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func durationSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []time.Duration{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]time.Duration, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = time.ParseDuration(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetDurationSlice returns the []time.Duration value of a flag with the given name
+func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) {
+	val, err := f.getFlagType(name, "durationSlice", durationSliceConv)
+	if err != nil {
+		return []time.Duration{}, err
+	}
+	return val.([]time.Duration), nil
+}
+
+// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string.
+// The argument p points to a []time.Duration variable in which to store the value of the flag.
+func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	f.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string.
+// The argument p points to a duration[] variable in which to store the value of the flag.
+func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage)
+}
+
+// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) {
+	CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage)
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	p := []time.Duration{}
+	f.DurationSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string.
+// The return value is the address of a []time.Duration variable that stores the value of the flag.
+func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, "", value, usage)
+}
+
+// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash.
+func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration {
+	return CommandLine.DurationSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
new file mode 100644
index 0000000..9beeda8
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -0,0 +1,1227 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pflag is a drop-in replacement for Go's flag package, implementing
+POSIX/GNU-style --flags.
+
+pflag is compatible with the GNU extensions to the POSIX recommendations
+for command-line options. See
+http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html
+
+Usage:
+
+pflag is a drop-in replacement of Go's native flag package. If you import
+pflag under the name "flag" then all code should continue to function
+with no changes.
+
+	import flag "github.com/spf13/pflag"
+
+There is one exception to this: if you directly instantiate the Flag struct
+there is one more field "Shorthand" that you will need to set.
+Most code never instantiates this struct directly, and instead uses
+functions such as String(), BoolVar(), and Var(), and is therefore
+unaffected.
+
+Define flags using flag.String(), Bool(), Int(), etc.
+
+This declares an integer flag, -flagname, stored in the pointer ip, with type *int.
+	var ip = flag.Int("flagname", 1234, "help message for flagname")
+If you like, you can bind the flag to a variable using the Var() functions.
+	var flagvar int
+	func init() {
+		flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname")
+	}
+Or you can create custom flags that satisfy the Value interface (with
+pointer receivers) and couple them to flag parsing by
+	flag.Var(&flagVal, "name", "help message for flagname")
+For such flags, the default value is just the initial value of the variable.
+
+After all flags are defined, call
+	flag.Parse()
+to parse the command line into the defined flags.
+
+Flags may then be used directly. If you're using the flags themselves,
+they are all pointers; if you bind to variables, they're values.
+	fmt.Println("ip has value ", *ip)
+	fmt.Println("flagvar has value ", flagvar)
+
+After parsing, the arguments after the flag are available as the
+slice flag.Args() or individually as flag.Arg(i).
+The arguments are indexed from 0 through flag.NArg()-1.
+
+The pflag package also defines some new functions that are not in flag,
+that give one-letter shorthands for flags. You can use these by appending
+'P' to the name of any function that defines a flag.
+	var ip = flag.IntP("flagname", "f", 1234, "help message")
+	var flagvar bool
+	func init() {
+		flag.BoolVarP("boolname", "b", true, "help message")
+	}
+	flag.VarP(&flagVar, "varname", "v", 1234, "help message")
+Shorthand letters can be used with single dashes on the command line.
+Boolean shorthand flags can be combined with other shorthand flags.
+
+Command line flag syntax:
+	--flag    // boolean flags only
+	--flag=x
+
+Unlike the flag package, a single dash before an option means something
+different than a double dash. Single dashes signify a series of shorthand
+letters for flags. All but the last shorthand letter must be boolean flags.
+	// boolean flags
+	-f
+	-abc
+	// non-boolean flags
+	-n 1234
+	-Ifile
+	// mixed
+	-abcs "hello"
+	-abcn1234
+
+Flag parsing stops after the terminator "--". Unlike the flag package,
+flags can be interspersed with arguments anywhere on the command line
+before this terminator.
+
+Integer flags accept 1234, 0664, 0x1234 and may be negative.
+Boolean flags (in their long form) accept 1, 0, t, f, true, false,
+TRUE, FALSE, True, False.
+Duration flags accept any input valid for time.ParseDuration.
+
+The default set of command-line flags is controlled by
+top-level functions.  The FlagSet type allows one to define
+independent sets of flags, such as to implement subcommands
+in a command-line interface. The methods of FlagSet are
+analogous to the top-level functions for the command-line
+flag set.
+*/
+package pflag
+
+import (
+	"bytes"
+	"errors"
+	goflag "flag"
+	"fmt"
+	"io"
+	"os"
+	"sort"
+	"strings"
+)
+
+// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined.
+var ErrHelp = errors.New("pflag: help requested")
+
+// ErrorHandling defines how to handle flag parsing errors.
+type ErrorHandling int
+
+const (
+	// ContinueOnError will return an err from Parse() if an error is found
+	ContinueOnError ErrorHandling = iota
+	// ExitOnError will call os.Exit(2) if an error is found when parsing
+	ExitOnError
+	// PanicOnError will panic() if an error is found when parsing flags
+	PanicOnError
+)
+
+// ParseErrorsWhitelist defines the parsing errors that can be ignored
+type ParseErrorsWhitelist struct {
+	// UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags
+	UnknownFlags bool
+}
+
+// NormalizedName is a flag name that has been normalized according to rules
+// for the FlagSet (e.g. making '-' and '_' equivalent).
+type NormalizedName string
+
+// A FlagSet represents a set of defined flags.
+type FlagSet struct {
+	// Usage is the function called when an error occurs while parsing flags.
+	// The field is a function (not a method) that may be changed to point to
+	// a custom error handler.
+	Usage func()
+
+	// SortFlags is used to indicate, if user wants to have sorted flags in
+	// help/usage messages.
+	SortFlags bool
+
+	// ParseErrorsWhitelist is used to configure a whitelist of errors
+	ParseErrorsWhitelist ParseErrorsWhitelist
+
+	name              string
+	parsed            bool
+	actual            map[NormalizedName]*Flag
+	orderedActual     []*Flag
+	sortedActual      []*Flag
+	formal            map[NormalizedName]*Flag
+	orderedFormal     []*Flag
+	sortedFormal      []*Flag
+	shorthands        map[byte]*Flag
+	args              []string // arguments after flags
+	argsLenAtDash     int      // len(args) when a '--' was located when parsing, or -1 if no --
+	errorHandling     ErrorHandling
+	output            io.Writer // nil means stderr; use out() accessor
+	interspersed      bool      // allow interspersed option/non-option args
+	normalizeNameFunc func(f *FlagSet, name string) NormalizedName
+
+	addedGoFlagSets []*goflag.FlagSet
+}
+
+// A Flag represents the state of a flag.
+type Flag struct {
+	Name                string              // name as it appears on command line
+	Shorthand           string              // one-letter abbreviated flag
+	Usage               string              // help message
+	Value               Value               // value as set
+	DefValue            string              // default value (as text); for usage message
+	Changed             bool                // If the user set the value (or if left to default)
+	NoOptDefVal         string              // default value (as text); if the flag is on the command line without any options
+	Deprecated          string              // If this flag is deprecated, this string is the new or now thing to use
+	Hidden              bool                // used by cobra.Command to allow flags to be hidden from help/usage text
+	ShorthandDeprecated string              // If the shorthand of this flag is deprecated, this string is the new or now thing to use
+	Annotations         map[string][]string // used by cobra.Command bash autocomple code
+}
+
+// Value is the interface to the dynamic value stored in a flag.
+// (The default value is represented as a string.)
+type Value interface {
+	String() string
+	Set(string) error
+	Type() string
+}
+
+// sortFlags returns the flags as a slice in lexicographical sorted order.
+func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
+	list := make(sort.StringSlice, len(flags))
+	i := 0
+	for k := range flags {
+		list[i] = string(k)
+		i++
+	}
+	list.Sort()
+	result := make([]*Flag, len(list))
+	for i, name := range list {
+		result[i] = flags[NormalizedName(name)]
+	}
+	return result
+}
+
+// SetNormalizeFunc allows you to add a function which can translate flag names.
+// Flags added to the FlagSet will be translated and then when anything tries to
+// look up the flag that will also be translated. So it would be possible to create
+// a flag named "getURL" and have it translated to "geturl".  A user could then pass
+// "--getUrl" which may also be translated to "geturl" and everything will work.
+func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
+	f.normalizeNameFunc = n
+	f.sortedFormal = f.sortedFormal[:0]
+	for fname, flag := range f.formal {
+		nname := f.normalizeFlagName(flag.Name)
+		if fname == nname {
+			continue
+		}
+		flag.Name = string(nname)
+		delete(f.formal, fname)
+		f.formal[nname] = flag
+		if _, set := f.actual[fname]; set {
+			delete(f.actual, fname)
+			f.actual[nname] = flag
+		}
+	}
+}
+
+// GetNormalizeFunc returns the previously set NormalizeFunc of a function which
+// does no translation, if not set previously.
+func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName {
+	if f.normalizeNameFunc != nil {
+		return f.normalizeNameFunc
+	}
+	return func(f *FlagSet, name string) NormalizedName { return NormalizedName(name) }
+}
+
+func (f *FlagSet) normalizeFlagName(name string) NormalizedName {
+	n := f.GetNormalizeFunc()
+	return n(f, name)
+}
+
+func (f *FlagSet) out() io.Writer {
+	if f.output == nil {
+		return os.Stderr
+	}
+	return f.output
+}
+
+// SetOutput sets the destination for usage and error messages.
+// If output is nil, os.Stderr is used.
+func (f *FlagSet) SetOutput(output io.Writer) {
+	f.output = output
+}
+
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func (f *FlagSet) VisitAll(fn func(*Flag)) {
+	if len(f.formal) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.formal) != len(f.sortedFormal) {
+			f.sortedFormal = sortFlags(f.formal)
+		}
+		flags = f.sortedFormal
+	} else {
+		flags = f.orderedFormal
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// HasFlags returns a bool to indicate if the FlagSet has any flags defined.
+func (f *FlagSet) HasFlags() bool {
+	return len(f.formal) > 0
+}
+
+// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags
+// that are not hidden.
+func (f *FlagSet) HasAvailableFlags() bool {
+	for _, flag := range f.formal {
+		if !flag.Hidden {
+			return true
+		}
+	}
+	return false
+}
+
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
+func VisitAll(fn func(*Flag)) {
+	CommandLine.VisitAll(fn)
+}
+
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func (f *FlagSet) Visit(fn func(*Flag)) {
+	if len(f.actual) == 0 {
+		return
+	}
+
+	var flags []*Flag
+	if f.SortFlags {
+		if len(f.actual) != len(f.sortedActual) {
+			f.sortedActual = sortFlags(f.actual)
+		}
+		flags = f.sortedActual
+	} else {
+		flags = f.orderedActual
+	}
+
+	for _, flag := range flags {
+		fn(flag)
+	}
+}
+
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
+func Visit(fn func(*Flag)) {
+	CommandLine.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) Lookup(name string) *Flag {
+	return f.lookup(f.normalizeFlagName(name))
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+	if name == "" {
+		return nil
+	}
+	if len(name) > 1 {
+		msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	c := name[0]
+	return f.shorthands[c]
+}
+
+// lookup returns the Flag structure of the named flag, returning nil if none exists.
+func (f *FlagSet) lookup(name NormalizedName) *Flag {
+	return f.formal[name]
+}
+
+// func to return a given type for a given flag name
+func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) {
+	flag := f.Lookup(name)
+	if flag == nil {
+		err := fmt.Errorf("flag accessed but not defined: %s", name)
+		return nil, err
+	}
+
+	if flag.Value.Type() != ftype {
+		err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type())
+		return nil, err
+	}
+
+	sval := flag.Value.String()
+	result, err := convFunc(sval)
+	if err != nil {
+		return nil, err
+	}
+	return result, nil
+}
+
+// ArgsLenAtDash will return the length of f.Args at the moment when a -- was
+// found during arg parsing. This allows your program to know which args were
+// before the -- and which came after.
+func (f *FlagSet) ArgsLenAtDash() int {
+	return f.argsLenAtDash
+}
+
+// MarkDeprecated indicated that a flag is deprecated in your program. It will
+// continue to function but will not show up in help or usage messages. Using
+// this flag will also print the given usageMessage.
+func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.Deprecated = usageMessage
+	flag.Hidden = true
+	return nil
+}
+
+// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your
+// program. It will continue to function but will not show up in help or usage
+// messages. Using this flag will also print the given usageMessage.
+func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	if usageMessage == "" {
+		return fmt.Errorf("deprecated message for flag %q must be set", name)
+	}
+	flag.ShorthandDeprecated = usageMessage
+	return nil
+}
+
+// MarkHidden sets a flag to 'hidden' in your program. It will continue to
+// function but will not show up in help or usage messages.
+func (f *FlagSet) MarkHidden(name string) error {
+	flag := f.Lookup(name)
+	if flag == nil {
+		return fmt.Errorf("flag %q does not exist", name)
+	}
+	flag.Hidden = true
+	return nil
+}
+
+// Lookup returns the Flag structure of the named command-line flag,
+// returning nil if none exists.
+func Lookup(name string) *Flag {
+	return CommandLine.Lookup(name)
+}
+
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+	return CommandLine.ShorthandLookup(name)
+}
+
+// Set sets the value of the named flag.
+func (f *FlagSet) Set(name, value string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+
+	err := flag.Value.Set(value)
+	if err != nil {
+		var flagName string
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			flagName = fmt.Sprintf("--%s", flag.Name)
+		}
+		return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
+	}
+
+	if !flag.Changed {
+		if f.actual == nil {
+			f.actual = make(map[NormalizedName]*Flag)
+		}
+		f.actual[normalName] = flag
+		f.orderedActual = append(f.orderedActual, flag)
+
+		flag.Changed = true
+	}
+
+	if flag.Deprecated != "" {
+		fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+	}
+	return nil
+}
+
+// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet.
+// This is sometimes used by spf13/cobra programs which want to generate additional
+// bash completion information.
+func (f *FlagSet) SetAnnotation(name, key string, values []string) error {
+	normalName := f.normalizeFlagName(name)
+	flag, ok := f.formal[normalName]
+	if !ok {
+		return fmt.Errorf("no such flag -%v", name)
+	}
+	if flag.Annotations == nil {
+		flag.Annotations = map[string][]string{}
+	}
+	flag.Annotations[key] = values
+	return nil
+}
+
+// Changed returns true if the flag was explicitly set during Parse() and false
+// otherwise
+func (f *FlagSet) Changed(name string) bool {
+	flag := f.Lookup(name)
+	// If a flag doesn't exist, it wasn't changed....
+	if flag == nil {
+		return false
+	}
+	return flag.Changed
+}
+
+// Set sets the value of the named command-line flag.
+func Set(name, value string) error {
+	return CommandLine.Set(name, value)
+}
+
+// PrintDefaults prints, to standard error unless configured
+// otherwise, the default values of all defined flags in the set.
+func (f *FlagSet) PrintDefaults() {
+	usages := f.FlagUsages()
+	fmt.Fprint(f.out(), usages)
+}
+
+// defaultIsZeroValue returns true if the default value for this flag represents
+// a zero value.
+func (f *Flag) defaultIsZeroValue() bool {
+	switch f.Value.(type) {
+	case boolFlag:
+		return f.DefValue == "false"
+	case *durationValue:
+		// Beginning in Go 1.7, duration zero values are "0s"
+		return f.DefValue == "0" || f.DefValue == "0s"
+	case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value:
+		return f.DefValue == "0"
+	case *stringValue:
+		return f.DefValue == ""
+	case *ipValue, *ipMaskValue, *ipNetValue:
+		return f.DefValue == "<nil>"
+	case *intSliceValue, *stringSliceValue, *stringArrayValue:
+		return f.DefValue == "[]"
+	default:
+		switch f.Value.String() {
+		case "false":
+			return true
+		case "<nil>":
+			return true
+		case "":
+			return true
+		case "0":
+			return true
+		}
+		return false
+	}
+}
+
+// UnquoteUsage extracts a back-quoted name from the usage
+// string for a flag and returns it and the un-quoted usage.
+// Given "a `name` to show" it returns ("name", "a name to show").
+// If there are no back quotes, the name is an educated guess of the
+// type of the flag's value, or the empty string if the flag is boolean.
+func UnquoteUsage(flag *Flag) (name string, usage string) {
+	// Look for a back-quoted name, but avoid the strings package.
+	usage = flag.Usage
+	for i := 0; i < len(usage); i++ {
+		if usage[i] == '`' {
+			for j := i + 1; j < len(usage); j++ {
+				if usage[j] == '`' {
+					name = usage[i+1 : j]
+					usage = usage[:i] + name + usage[j+1:]
+					return name, usage
+				}
+			}
+			break // Only one back quote; use type name.
+		}
+	}
+
+	name = flag.Value.Type()
+	switch name {
+	case "bool":
+		name = ""
+	case "float64":
+		name = "float"
+	case "int64":
+		name = "int"
+	case "uint64":
+		name = "uint"
+	case "stringSlice":
+		name = "strings"
+	case "intSlice":
+		name = "ints"
+	case "uintSlice":
+		name = "uints"
+	case "boolSlice":
+		name = "bools"
+	}
+
+	return
+}
+
+// Splits the string `s` on whitespace into an initial substring up to
+// `i` runes in length and the remainder. Will go `slop` over `i` if
+// that encompasses the entire string (which allows the caller to
+// avoid short orphan words on the final line).
+func wrapN(i, slop int, s string) (string, string) {
+	if i+slop > len(s) {
+		return s, ""
+	}
+
+	w := strings.LastIndexAny(s[:i], " \t\n")
+	if w <= 0 {
+		return s, ""
+	}
+	nlPos := strings.LastIndex(s[:i], "\n")
+	if nlPos > 0 && nlPos < w {
+		return s[:nlPos], s[nlPos+1:]
+	}
+	return s[:w], s[w+1:]
+}
+
+// Wraps the string `s` to a maximum width `w` with leading indent
+// `i`. The first line is not indented (this is assumed to be done by
+// caller). Pass `w` == 0 to do no wrapping
+func wrap(i, w int, s string) string {
+	if w == 0 {
+		return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	// space between indent i and end of line width w into which
+	// we should wrap the text.
+	wrap := w - i
+
+	var r, l string
+
+	// Not enough space for sensible wrapping. Wrap as a block on
+	// the next line instead.
+	if wrap < 24 {
+		i = 16
+		wrap = w - i
+		r += "\n" + strings.Repeat(" ", i)
+	}
+	// If still not enough space then don't even try to wrap.
+	if wrap < 24 {
+		return strings.Replace(s, "\n", r, -1)
+	}
+
+	// Try to avoid short orphan words on the final line, by
+	// allowing wrapN to go a bit over if that would fit in the
+	// remainder of the line.
+	slop := 5
+	wrap = wrap - slop
+
+	// Handle first line, which is indented by the caller (or the
+	// special case above)
+	l, s = wrapN(wrap, slop, s)
+	r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1)
+
+	// Now wrap the rest
+	for s != "" {
+		var t string
+
+		t, s = wrapN(wrap, slop, s)
+		r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1)
+	}
+
+	return r
+
+}
+
+// FlagUsagesWrapped returns a string containing the usage information
+// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
+// wrapping)
+func (f *FlagSet) FlagUsagesWrapped(cols int) string {
+	buf := new(bytes.Buffer)
+
+	lines := make([]string, 0, len(f.formal))
+
+	maxlen := 0
+	f.VisitAll(func(flag *Flag) {
+		if flag.Hidden {
+			return
+		}
+
+		line := ""
+		if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+			line = fmt.Sprintf("  -%s, --%s", flag.Shorthand, flag.Name)
+		} else {
+			line = fmt.Sprintf("      --%s", flag.Name)
+		}
+
+		varname, usage := UnquoteUsage(flag)
+		if varname != "" {
+			line += " " + varname
+		}
+		if flag.NoOptDefVal != "" {
+			switch flag.Value.Type() {
+			case "string":
+				line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
+			case "bool":
+				if flag.NoOptDefVal != "true" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			case "count":
+				if flag.NoOptDefVal != "+1" {
+					line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+				}
+			default:
+				line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+			}
+		}
+
+		// This special character will be replaced with spacing once the
+		// correct alignment is calculated
+		line += "\x00"
+		if len(line) > maxlen {
+			maxlen = len(line)
+		}
+
+		line += usage
+		if !flag.defaultIsZeroValue() {
+			if flag.Value.Type() == "string" {
+				line += fmt.Sprintf(" (default %q)", flag.DefValue)
+			} else {
+				line += fmt.Sprintf(" (default %s)", flag.DefValue)
+			}
+		}
+		if len(flag.Deprecated) != 0 {
+			line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated)
+		}
+
+		lines = append(lines, line)
+	})
+
+	for _, line := range lines {
+		sidx := strings.Index(line, "\x00")
+		spacing := strings.Repeat(" ", maxlen-sidx)
+		// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
+		fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+	}
+
+	return buf.String()
+}
+
+// FlagUsages returns a string containing the usage information for all flags in
+// the FlagSet
+func (f *FlagSet) FlagUsages() string {
+	return f.FlagUsagesWrapped(0)
+}
+
+// PrintDefaults prints to standard error the default values of all defined command-line flags.
+func PrintDefaults() {
+	CommandLine.PrintDefaults()
+}
+
+// defaultUsage is the default function to print a usage message.
+func defaultUsage(f *FlagSet) {
+	fmt.Fprintf(f.out(), "Usage of %s:\n", f.name)
+	f.PrintDefaults()
+}
+
+// NOTE: Usage is not just defaultUsage(CommandLine)
+// because it serves (via godoc flag Usage) as the example
+// for how to write your own usage function.
+
+// Usage prints to standard error a usage message documenting all defined command-line flags.
+// The function is a variable that may be changed to point to a custom function.
+// By default it prints a simple header and calls PrintDefaults; for details about the
+// format of the output and how to control it, see the documentation for PrintDefaults.
+var Usage = func() {
+	fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
+	PrintDefaults()
+}
+
+// NFlag returns the number of flags that have been set.
+func (f *FlagSet) NFlag() int { return len(f.actual) }
+
+// NFlag returns the number of command-line flags that have been set.
+func NFlag() int { return len(CommandLine.actual) }
+
+// Arg returns the i'th argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func (f *FlagSet) Arg(i int) string {
+	if i < 0 || i >= len(f.args) {
+		return ""
+	}
+	return f.args[i]
+}
+
+// Arg returns the i'th command-line argument.  Arg(0) is the first remaining argument
+// after flags have been processed.
+func Arg(i int) string {
+	return CommandLine.Arg(i)
+}
+
+// NArg is the number of arguments remaining after flags have been processed.
+func (f *FlagSet) NArg() int { return len(f.args) }
+
+// NArg is the number of arguments remaining after flags have been processed.
+func NArg() int { return len(CommandLine.args) }
+
+// Args returns the non-flag arguments.
+func (f *FlagSet) Args() []string { return f.args }
+
+// Args returns the non-flag command-line arguments.
+func Args() []string { return CommandLine.args }
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func (f *FlagSet) Var(value Value, name string, usage string) {
+	f.VarP(value, name, "", usage)
+}
+
+// VarPF is like VarP, but returns the flag created
+func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:      name,
+		Shorthand: shorthand,
+		Usage:     usage,
+		Value:     value,
+		DefValue:  value.String(),
+	}
+	f.AddFlag(flag)
+	return flag
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
+	f.VarPF(value, name, shorthand, usage)
+}
+
+// AddFlag will add the flag to the FlagSet
+func (f *FlagSet) AddFlag(flag *Flag) {
+	normalizedFlagName := f.normalizeFlagName(flag.Name)
+
+	_, alreadyThere := f.formal[normalizedFlagName]
+	if alreadyThere {
+		msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
+		fmt.Fprintln(f.out(), msg)
+		panic(msg) // Happens only if flags are declared with identical names
+	}
+	if f.formal == nil {
+		f.formal = make(map[NormalizedName]*Flag)
+	}
+
+	flag.Name = string(normalizedFlagName)
+	f.formal[normalizedFlagName] = flag
+	f.orderedFormal = append(f.orderedFormal, flag)
+
+	if flag.Shorthand == "" {
+		return
+	}
+	if len(flag.Shorthand) > 1 {
+		msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	if f.shorthands == nil {
+		f.shorthands = make(map[byte]*Flag)
+	}
+	c := flag.Shorthand[0]
+	used, alreadyThere := f.shorthands[c]
+	if alreadyThere {
+		msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+		fmt.Fprintf(f.out(), msg)
+		panic(msg)
+	}
+	f.shorthands[c] = flag
+}
+
+// AddFlagSet adds one FlagSet to another. If a flag is already present in f
+// the flag from newSet will be ignored.
+func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(flag *Flag) {
+		if f.Lookup(flag.Name) == nil {
+			f.AddFlag(flag)
+		}
+	})
+}
+
+// Var defines a flag with the specified name and usage string. The type and
+// value of the flag are represented by the first argument, of type Value, which
+// typically holds a user-defined implementation of Value. For instance, the
+// caller could create a flag that turns a comma-separated string into a slice
+// of strings by giving the slice the methods of Value; in particular, Set would
+// decompose the comma-separated string into the slice.
+func Var(value Value, name string, usage string) {
+	CommandLine.VarP(value, name, "", usage)
+}
+
+// VarP is like Var, but accepts a shorthand letter that can be used after a single dash.
+func VarP(value Value, name, shorthand, usage string) {
+	CommandLine.VarP(value, name, shorthand, usage)
+}
+
+// failf prints to standard error a formatted error and usage message and
+// returns the error.
+func (f *FlagSet) failf(format string, a ...interface{}) error {
+	err := fmt.Errorf(format, a...)
+	if f.errorHandling != ContinueOnError {
+		fmt.Fprintln(f.out(), err)
+		f.usage()
+	}
+	return err
+}
+
+// usage calls the Usage method for the flag set, or the usage function if
+// the flag set is CommandLine.
+func (f *FlagSet) usage() {
+	if f == CommandLine {
+		Usage()
+	} else if f.Usage == nil {
+		defaultUsage(f)
+	} else {
+		f.Usage()
+	}
+}
+
+//--unknown (args will be empty)
+//--unknown --next-flag ... (args will be --next-flag ...)
+//--unknown arg ... (args will be arg ...)
+func stripUnknownFlagValue(args []string) []string {
+	if len(args) == 0 {
+		//--unknown
+		return args
+	}
+
+	first := args[0]
+	if len(first) > 0 && first[0] == '-' {
+		//--unknown --next-flag ...
+		return args
+	}
+
+	//--unknown arg ... (args will be arg ...)
+	if len(args) > 1 {
+		return args[1:]
+	}
+	return nil
+}
+
+func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	name := s[2:]
+	if len(name) == 0 || name[0] == '-' || name[0] == '=' {
+		err = f.failf("bad flag syntax: %s", s)
+		return
+	}
+
+	split := strings.SplitN(name, "=", 2)
+	name = split[0]
+	flag, exists := f.formal[f.normalizeFlagName(name)]
+
+	if !exists {
+		switch {
+		case name == "help":
+			f.usage()
+			return a, ErrHelp
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// --unknown=unknownval arg ...
+			// we do not want to lose arg in this case
+			if len(split) >= 2 {
+				return a, nil
+			}
+
+			return stripUnknownFlagValue(a), nil
+		default:
+			err = f.failf("unknown flag: --%s", name)
+			return
+		}
+	}
+
+	var value string
+	if len(split) == 2 {
+		// '--flag=arg'
+		value = split[1]
+	} else if flag.NoOptDefVal != "" {
+		// '--flag' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(a) > 0 {
+		// '--flag arg'
+		value = a[0]
+		a = a[1:]
+	} else {
+		// '--flag' (arg was required)
+		err = f.failf("flag needs an argument: %s", s)
+		return
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parseFunc) (outShorts string, outArgs []string, err error) {
+	outArgs = args
+
+	if strings.HasPrefix(shorthands, "test.") {
+		return
+	}
+
+	outShorts = shorthands[1:]
+	c := shorthands[0]
+
+	flag, exists := f.shorthands[c]
+	if !exists {
+		switch {
+		case c == 'h':
+			f.usage()
+			err = ErrHelp
+			return
+		case f.ParseErrorsWhitelist.UnknownFlags:
+			// '-f=arg arg ...'
+			// we do not want to lose arg in this case
+			if len(shorthands) > 2 && shorthands[1] == '=' {
+				outShorts = ""
+				return
+			}
+
+			outArgs = stripUnknownFlagValue(outArgs)
+			return
+		default:
+			err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
+			return
+		}
+	}
+
+	var value string
+	if len(shorthands) > 2 && shorthands[1] == '=' {
+		// '-f=arg'
+		value = shorthands[2:]
+		outShorts = ""
+	} else if flag.NoOptDefVal != "" {
+		// '-f' (arg was optional)
+		value = flag.NoOptDefVal
+	} else if len(shorthands) > 1 {
+		// '-farg'
+		value = shorthands[1:]
+		outShorts = ""
+	} else if len(args) > 0 {
+		// '-f arg'
+		value = args[0]
+		outArgs = args[1:]
+	} else {
+		// '-f' (arg was required)
+		err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
+		return
+	}
+
+	if flag.ShorthandDeprecated != "" {
+		fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+	}
+
+	err = fn(flag, value)
+	if err != nil {
+		f.failf(err.Error())
+	}
+	return
+}
+
+func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []string, err error) {
+	a = args
+	shorthands := s[1:]
+
+	// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
+	for len(shorthands) > 0 {
+		shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
+		if err != nil {
+			return
+		}
+	}
+
+	return
+}
+
+func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
+	for len(args) > 0 {
+		s := args[0]
+		args = args[1:]
+		if len(s) == 0 || s[0] != '-' || len(s) == 1 {
+			if !f.interspersed {
+				f.args = append(f.args, s)
+				f.args = append(f.args, args...)
+				return nil
+			}
+			f.args = append(f.args, s)
+			continue
+		}
+
+		if s[1] == '-' {
+			if len(s) == 2 { // "--" terminates the flags
+				f.argsLenAtDash = len(f.args)
+				f.args = append(f.args, args...)
+				break
+			}
+			args, err = f.parseLongArg(s, args, fn)
+		} else {
+			args, err = f.parseShortArg(s, args, fn)
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// Parse parses flag definitions from the argument list, which should not
+// include the command name.  Must be called after all flags in the FlagSet
+// are defined and before flags are accessed by the program.
+// The return value will be ErrHelp if -help was set but not defined.
+func (f *FlagSet) Parse(arguments []string) error {
+	if f.addedGoFlagSets != nil {
+		for _, goFlagSet := range f.addedGoFlagSets {
+			goFlagSet.Parse(nil)
+		}
+	}
+	f.parsed = true
+
+	if len(arguments) < 0 {
+		return nil
+	}
+
+	f.args = make([]string, 0, len(arguments))
+
+	set := func(flag *Flag, value string) error {
+		return f.Set(flag.Name, value)
+	}
+
+	err := f.parseArgs(arguments, set)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			fmt.Println(err)
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+type parseFunc func(flag *Flag, value string) error
+
+// ParseAll parses flag definitions from the argument list, which should not
+// include the command name. The arguments for fn are flag and value. Must be
+// called after all flags in the FlagSet are defined and before flags are
+// accessed by the program. The return value will be ErrHelp if -help was set
+// but not defined.
+func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string) error) error {
+	f.parsed = true
+	f.args = make([]string, 0, len(arguments))
+
+	err := f.parseArgs(arguments, fn)
+	if err != nil {
+		switch f.errorHandling {
+		case ContinueOnError:
+			return err
+		case ExitOnError:
+			os.Exit(2)
+		case PanicOnError:
+			panic(err)
+		}
+	}
+	return nil
+}
+
+// Parsed reports whether f.Parse has been called.
+func (f *FlagSet) Parsed() bool {
+	return f.parsed
+}
+
+// Parse parses the command-line flags from os.Args[1:].  Must be called
+// after all flags are defined and before flags are accessed by the program.
+func Parse() {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.Parse(os.Args[1:])
+}
+
+// ParseAll parses the command-line flags from os.Args[1:] and called fn for each.
+// The arguments for fn are flag and value. Must be called after all flags are
+// defined and before flags are accessed by the program.
+func ParseAll(fn func(flag *Flag, value string) error) {
+	// Ignore errors; CommandLine is set for ExitOnError.
+	CommandLine.ParseAll(os.Args[1:], fn)
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func SetInterspersed(interspersed bool) {
+	CommandLine.SetInterspersed(interspersed)
+}
+
+// Parsed returns true if the command-line flags have been parsed.
+func Parsed() bool {
+	return CommandLine.Parsed()
+}
+
+// CommandLine is the default set of command-line flags, parsed from os.Args.
+var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
+
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
+func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
+	f := &FlagSet{
+		name:          name,
+		errorHandling: errorHandling,
+		argsLenAtDash: -1,
+		interspersed:  true,
+		SortFlags:     true,
+	}
+	return f
+}
+
+// SetInterspersed sets whether to support interspersed option/non-option arguments.
+func (f *FlagSet) SetInterspersed(interspersed bool) {
+	f.interspersed = interspersed
+}
+
+// Init sets the name and error handling property for a flag set.
+// By default, the zero FlagSet uses an empty name and the
+// ContinueOnError error handling policy.
+func (f *FlagSet) Init(name string, errorHandling ErrorHandling) {
+	f.name = name
+	f.errorHandling = errorHandling
+	f.argsLenAtDash = -1
+}
diff --git a/vendor/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go
new file mode 100644
index 0000000..a243f81
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- float32 Value
+type float32Value float32
+
+func newFloat32Value(val float32, p *float32) *float32Value {
+	*p = val
+	return (*float32Value)(p)
+}
+
+func (f *float32Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	*f = float32Value(v)
+	return err
+}
+
+func (f *float32Value) Type() string {
+	return "float32"
+}
+
+func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) }
+
+func float32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseFloat(sval, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(v), nil
+}
+
+// GetFloat32 return the float32 value of a flag with the given name
+func (f *FlagSet) GetFloat32(name string) (float32, error) {
+	val, err := f.getFlagType(name, "float32", float32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float32), nil
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	f.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32Var defines a float32 flag with specified name, default value, and usage string.
+// The argument p points to a float32 variable in which to store the value of the flag.
+func Float32Var(p *float32, name string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, "", usage)
+}
+
+// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash.
+func Float32VarP(p *float32, name, shorthand string, value float32, usage string) {
+	CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage)
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func (f *FlagSet) Float32(name string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 {
+	p := new(float32)
+	f.Float32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float32 defines a float32 flag with specified name, default value, and usage string.
+// The return value is the address of a float32 variable that stores the value of the flag.
+func Float32(name string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, "", value, usage)
+}
+
+// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash.
+func Float32P(name, shorthand string, value float32, usage string) *float32 {
+	return CommandLine.Float32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go
new file mode 100644
index 0000000..04b5492
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/float64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- float64 Value
+type float64Value float64
+
+func newFloat64Value(val float64, p *float64) *float64Value {
+	*p = val
+	return (*float64Value)(p)
+}
+
+func (f *float64Value) Set(s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	*f = float64Value(v)
+	return err
+}
+
+func (f *float64Value) Type() string {
+	return "float64"
+}
+
+func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) }
+
+func float64Conv(sval string) (interface{}, error) {
+	return strconv.ParseFloat(sval, 64)
+}
+
+// GetFloat64 return the float64 value of a flag with the given name
+func (f *FlagSet) GetFloat64(name string) (float64, error) {
+	val, err := f.getFlagType(name, "float64", float64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(float64), nil
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	f.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default value, and usage string.
+// The argument p points to a float64 variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, "", usage)
+}
+
+// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash.
+func Float64VarP(p *float64, name, shorthand string, value float64, usage string) {
+	CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func (f *FlagSet) Float64(name string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 {
+	p := new(float64)
+	f.Float64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Float64 defines a float64 flag with specified name, default value, and usage string.
+// The return value is the address of a float64 variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, "", value, usage)
+}
+
+// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash.
+func Float64P(name, shorthand string, value float64, usage string) *float64 {
+	return CommandLine.Float64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go
new file mode 100644
index 0000000..d3dd72b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/golangflag.go
@@ -0,0 +1,105 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag
+
+import (
+	goflag "flag"
+	"reflect"
+	"strings"
+)
+
+// flagValueWrapper implements pflag.Value around a flag.Value.  The main
+// difference here is the addition of the Type method that returns a string
+// name of the type.  As this is generally unknown, we approximate that with
+// reflection.
+type flagValueWrapper struct {
+	inner    goflag.Value
+	flagType string
+}
+
+// We are just copying the boolFlag interface out of goflag as that is what
+// they use to decide if a flag should get "true" when no arg is given.
+type goBoolFlag interface {
+	goflag.Value
+	IsBoolFlag() bool
+}
+
+func wrapFlagValue(v goflag.Value) Value {
+	// If the flag.Value happens to also be a pflag.Value, just use it directly.
+	if pv, ok := v.(Value); ok {
+		return pv
+	}
+
+	pv := &flagValueWrapper{
+		inner: v,
+	}
+
+	t := reflect.TypeOf(v)
+	if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+		t = t.Elem()
+	}
+
+	pv.flagType = strings.TrimSuffix(t.Name(), "Value")
+	return pv
+}
+
+func (v *flagValueWrapper) String() string {
+	return v.inner.String()
+}
+
+func (v *flagValueWrapper) Set(s string) error {
+	return v.inner.Set(s)
+}
+
+func (v *flagValueWrapper) Type() string {
+	return v.flagType
+}
+
+// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag
+// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei
+// with both `-v` and `--v` in flags. If the golang flag was more than a single
+// character (ex: `verbose`) it will only be accessible via `--verbose`
+func PFlagFromGoFlag(goflag *goflag.Flag) *Flag {
+	// Remember the default value as a string; it won't change.
+	flag := &Flag{
+		Name:  goflag.Name,
+		Usage: goflag.Usage,
+		Value: wrapFlagValue(goflag.Value),
+		// Looks like golang flags don't set DefValue correctly  :-(
+		//DefValue: goflag.DefValue,
+		DefValue: goflag.Value.String(),
+	}
+	// Ex: if the golang flag was -v, allow both -v and --v to work
+	if len(flag.Name) == 1 {
+		flag.Shorthand = flag.Name
+	}
+	if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() {
+		flag.NoOptDefVal = "true"
+	}
+	return flag
+}
+
+// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet
+func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) {
+	if f.Lookup(goflag.Name) != nil {
+		return
+	}
+	newflag := PFlagFromGoFlag(goflag)
+	f.AddFlag(newflag)
+}
+
+// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet
+func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) {
+	if newSet == nil {
+		return
+	}
+	newSet.VisitAll(func(goflag *goflag.Flag) {
+		f.AddGoFlag(goflag)
+	})
+	if f.addedGoFlagSets == nil {
+		f.addedGoFlagSets = make([]*goflag.FlagSet, 0)
+	}
+	f.addedGoFlagSets = append(f.addedGoFlagSets, newSet)
+}
diff --git a/vendor/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go
new file mode 100644
index 0000000..1474b89
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int Value
+type intValue int
+
+func newIntValue(val int, p *int) *intValue {
+	*p = val
+	return (*intValue)(p)
+}
+
+func (i *intValue) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = intValue(v)
+	return err
+}
+
+func (i *intValue) Type() string {
+	return "int"
+}
+
+func (i *intValue) String() string { return strconv.Itoa(int(*i)) }
+
+func intConv(sval string) (interface{}, error) {
+	return strconv.Atoi(sval)
+}
+
+// GetInt return the int value of a flag with the given name
+func (f *FlagSet) GetInt(name string) (int, error) {
+	val, err := f.getFlagType(name, "int", intConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int), nil
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func (f *FlagSet) IntVar(p *int, name string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) {
+	f.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and usage string.
+// The argument p points to an int variable in which to store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, "", usage)
+}
+
+// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash.
+func IntVarP(p *int, name, shorthand string, value int, usage string) {
+	CommandLine.VarP(newIntValue(value, p), name, shorthand, usage)
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func (f *FlagSet) Int(name string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, "", value, usage)
+	return p
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int {
+	p := new(int)
+	f.IntVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int defines an int flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+	return CommandLine.IntP(name, "", value, usage)
+}
+
+// IntP is like Int, but accepts a shorthand letter that can be used after a single dash.
+func IntP(name, shorthand string, value int, usage string) *int {
+	return CommandLine.IntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+	*p = val
+	return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 16)
+	*i = int16Value(v)
+	return err
+}
+
+func (i *int16Value) Type() string {
+	return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+	val, err := f.getFlagType(name, "int16", int16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+	CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+	p := new(int16)
+	f.Int16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+	return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go
new file mode 100644
index 0000000..9b95944
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int32 Value
+type int32Value int32
+
+func newInt32Value(val int32, p *int32) *int32Value {
+	*p = val
+	return (*int32Value)(p)
+}
+
+func (i *int32Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 32)
+	*i = int32Value(v)
+	return err
+}
+
+func (i *int32Value) Type() string {
+	return "int32"
+}
+
+func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(v), nil
+}
+
+// GetInt32 return the int32 value of a flag with the given name
+func (f *FlagSet) GetInt32(name string) (int32, error) {
+	val, err := f.getFlagType(name, "int32", int32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int32), nil
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	f.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32Var defines an int32 flag with specified name, default value, and usage string.
+// The argument p points to an int32 variable in which to store the value of the flag.
+func Int32Var(p *int32, name string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, "", usage)
+}
+
+// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash.
+func Int32VarP(p *int32, name, shorthand string, value int32, usage string) {
+	CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage)
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func (f *FlagSet) Int32(name string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 {
+	p := new(int32)
+	f.Int32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int32 defines an int32 flag with specified name, default value, and usage string.
+// The return value is the address of an int32 variable that stores the value of the flag.
+func Int32(name string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, "", value, usage)
+}
+
+// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash.
+func Int32P(name, shorthand string, value int32, usage string) *int32 {
+	return CommandLine.Int32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go
new file mode 100644
index 0000000..0026d78
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int64.go
@@ -0,0 +1,84 @@
+package pflag
+
+import "strconv"
+
+// -- int64 Value
+type int64Value int64
+
+func newInt64Value(val int64, p *int64) *int64Value {
+	*p = val
+	return (*int64Value)(p)
+}
+
+func (i *int64Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 64)
+	*i = int64Value(v)
+	return err
+}
+
+func (i *int64Value) Type() string {
+	return "int64"
+}
+
+func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int64Conv(sval string) (interface{}, error) {
+	return strconv.ParseInt(sval, 0, 64)
+}
+
+// GetInt64 return the int64 value of a flag with the given name
+func (f *FlagSet) GetInt64(name string) (int64, error) {
+	val, err := f.getFlagType(name, "int64", int64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int64), nil
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	f.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value, and usage string.
+// The argument p points to an int64 variable in which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, "", usage)
+}
+
+// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash.
+func Int64VarP(p *int64, name, shorthand string, value int64, usage string) {
+	CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func (f *FlagSet) Int64(name string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 {
+	p := new(int64)
+	f.Int64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int64 defines an int64 flag with specified name, default value, and usage string.
+// The return value is the address of an int64 variable that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, "", value, usage)
+}
+
+// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash.
+func Int64P(name, shorthand string, value int64, usage string) *int64 {
+	return CommandLine.Int64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go
new file mode 100644
index 0000000..4da9222
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int8 Value
+type int8Value int8
+
+func newInt8Value(val int8, p *int8) *int8Value {
+	*p = val
+	return (*int8Value)(p)
+}
+
+func (i *int8Value) Set(s string) error {
+	v, err := strconv.ParseInt(s, 0, 8)
+	*i = int8Value(v)
+	return err
+}
+
+func (i *int8Value) Type() string {
+	return "int8"
+}
+
+func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseInt(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return int8(v), nil
+}
+
+// GetInt8 return the int8 value of a flag with the given name
+func (f *FlagSet) GetInt8(name string) (int8, error) {
+	val, err := f.getFlagType(name, "int8", int8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(int8), nil
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	f.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8Var defines an int8 flag with specified name, default value, and usage string.
+// The argument p points to an int8 variable in which to store the value of the flag.
+func Int8Var(p *int8, name string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, "", usage)
+}
+
+// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash.
+func Int8VarP(p *int8, name, shorthand string, value int8, usage string) {
+	CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage)
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func (f *FlagSet) Int8(name string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 {
+	p := new(int8)
+	f.Int8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Int8 defines an int8 flag with specified name, default value, and usage string.
+// The return value is the address of an int8 variable that stores the value of the flag.
+func Int8(name string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, "", value, usage)
+}
+
+// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash.
+func Int8P(name, shorthand string, value int8, usage string) *int8 {
+	return CommandLine.Int8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go
new file mode 100644
index 0000000..1e7c9ed
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int_slice.go
@@ -0,0 +1,128 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- intSlice Value
+type intSliceValue struct {
+	value   *[]int
+	changed bool
+}
+
+func newIntSliceValue(val []int, p *[]int) *intSliceValue {
+	isv := new(intSliceValue)
+	isv.value = p
+	*isv.value = val
+	return isv
+}
+
+func (s *intSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return err
+		}
+
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *intSliceValue) Type() string {
+	return "intSlice"
+}
+
+func (s *intSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func intSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []int{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]int, len(ss))
+	for i, d := range ss {
+		var err error
+		out[i], err = strconv.Atoi(d)
+		if err != nil {
+			return nil, err
+		}
+
+	}
+	return out, nil
+}
+
+// GetIntSlice return the []int value of a flag with the given name
+func (f *FlagSet) GetIntSlice(name string) ([]int, error) {
+	val, err := f.getFlagType(name, "intSlice", intSliceConv)
+	if err != nil {
+		return []int{}, err
+	}
+	return val.([]int), nil
+}
+
+// IntSliceVar defines a intSlice flag with specified name, default value, and usage string.
+// The argument p points to a []int variable in which to store the value of the flag.
+func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	f.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSliceVar defines a int[] flag with specified name, default value, and usage string.
+// The argument p points to a int[] variable in which to store the value of the flag.
+func IntSliceVar(p *[]int, name string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, "", usage)
+}
+
+// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) {
+	CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage)
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	p := []int{}
+	f.IntSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IntSlice defines a []int flag with specified name, default value, and usage string.
+// The return value is the address of a []int variable that stores the value of the flag.
+func IntSlice(name string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, "", value, usage)
+}
+
+// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash.
+func IntSliceP(name, shorthand string, value []int, usage string) *[]int {
+	return CommandLine.IntSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go
new file mode 100644
index 0000000..3d414ba
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip.go
@@ -0,0 +1,94 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// -- net.IP value
+type ipValue net.IP
+
+func newIPValue(val net.IP, p *net.IP) *ipValue {
+	*p = val
+	return (*ipValue)(p)
+}
+
+func (i *ipValue) String() string { return net.IP(*i).String() }
+func (i *ipValue) Set(s string) error {
+	ip := net.ParseIP(strings.TrimSpace(s))
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP: %q", s)
+	}
+	*i = ipValue(ip)
+	return nil
+}
+
+func (i *ipValue) Type() string {
+	return "ip"
+}
+
+func ipConv(sval string) (interface{}, error) {
+	ip := net.ParseIP(sval)
+	if ip != nil {
+		return ip, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+}
+
+// GetIP return the net.IP value of a flag with the given name
+func (f *FlagSet) GetIP(name string) (net.IP, error) {
+	val, err := f.getFlagType(name, "ip", ipConv)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IP), nil
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	f.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IPVar defines an net.IP flag with specified name, default value, and usage string.
+// The argument p points to an net.IP variable in which to store the value of the flag.
+func IPVar(p *net.IP, name string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, "", usage)
+}
+
+// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash.
+func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) {
+	CommandLine.VarP(newIPValue(value, p), name, shorthand, usage)
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	p := new(net.IP)
+	f.IPVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IP defines an net.IP flag with specified name, default value, and usage string.
+// The return value is the address of an net.IP variable that stores the value of the flag.
+func IP(name string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, "", value, usage)
+}
+
+// IPP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPP(name, shorthand string, value net.IP, usage string) *net.IP {
+	return CommandLine.IPP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ip_slice.go b/vendor/github.com/spf13/pflag/ip_slice.go
new file mode 100644
index 0000000..7dd196f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ip_slice.go
@@ -0,0 +1,148 @@
+package pflag
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"strings"
+)
+
+// -- ipSlice Value
+type ipSliceValue struct {
+	value   *[]net.IP
+	changed bool
+}
+
+func newIPSliceValue(val []net.IP, p *[]net.IP) *ipSliceValue {
+	ipsv := new(ipSliceValue)
+	ipsv.value = p
+	*ipsv.value = val
+	return ipsv
+}
+
+// Set converts, and assigns, the comma-separated IP argument string representation as the []net.IP value of this flag.
+// If Set is called on a flag that already has a []net.IP assigned, the newly converted values will be appended.
+func (s *ipSliceValue) Set(val string) error {
+
+	// remove all quote characters
+	rmQuote := strings.NewReplacer(`"`, "", `'`, "", "`", "")
+
+	// read flag arguments with CSV parser
+	ipStrSlice, err := readAsCSV(rmQuote.Replace(val))
+	if err != nil && err != io.EOF {
+		return err
+	}
+
+	// parse ip values into slice
+	out := make([]net.IP, 0, len(ipStrSlice))
+	for _, ipStr := range ipStrSlice {
+		ip := net.ParseIP(strings.TrimSpace(ipStr))
+		if ip == nil {
+			return fmt.Errorf("invalid string being converted to IP address: %s", ipStr)
+		}
+		out = append(out, ip)
+	}
+
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+
+	s.changed = true
+
+	return nil
+}
+
+// Type returns a string that uniquely represents this flag's type.
+func (s *ipSliceValue) Type() string {
+	return "ipSlice"
+}
+
+// String defines a "native" format for this net.IP slice flag value.
+func (s *ipSliceValue) String() string {
+
+	ipStrSlice := make([]string, len(*s.value))
+	for i, ip := range *s.value {
+		ipStrSlice[i] = ip.String()
+	}
+
+	out, _ := writeAsCSV(ipStrSlice)
+
+	return "[" + out + "]"
+}
+
+func ipSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Emtpy string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []net.IP{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]net.IP, len(ss))
+	for i, sval := range ss {
+		ip := net.ParseIP(strings.TrimSpace(sval))
+		if ip == nil {
+			return nil, fmt.Errorf("invalid string being converted to IP address: %s", sval)
+		}
+		out[i] = ip
+	}
+	return out, nil
+}
+
+// GetIPSlice returns the []net.IP value of a flag with the given name
+func (f *FlagSet) GetIPSlice(name string) ([]net.IP, error) {
+	val, err := f.getFlagType(name, "ipSlice", ipSliceConv)
+	if err != nil {
+		return []net.IP{}, err
+	}
+	return val.([]net.IP), nil
+}
+
+// IPSliceVar defines a ipSlice flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func (f *FlagSet) IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	f.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSliceVar defines a []net.IP flag with specified name, default value, and usage string.
+// The argument p points to a []net.IP variable in which to store the value of the flag.
+func IPSliceVar(p *[]net.IP, name string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, "", usage)
+}
+
+// IPSliceVarP is like IPSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceVarP(p *[]net.IP, name, shorthand string, value []net.IP, usage string) {
+	CommandLine.VarP(newIPSliceValue(value, p), name, shorthand, usage)
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of that flag.
+func (f *FlagSet) IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	p := []net.IP{}
+	f.IPSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// IPSlice defines a []net.IP flag with specified name, default value, and usage string.
+// The return value is the address of a []net.IP variable that stores the value of the flag.
+func IPSlice(name string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, "", value, usage)
+}
+
+// IPSliceP is like IPSlice, but accepts a shorthand letter that can be used after a single dash.
+func IPSliceP(name, shorthand string, value []net.IP, usage string) *[]net.IP {
+	return CommandLine.IPSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go
new file mode 100644
index 0000000..5bd44bd
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipmask.go
@@ -0,0 +1,122 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strconv"
+)
+
+// -- net.IPMask value
+type ipMaskValue net.IPMask
+
+func newIPMaskValue(val net.IPMask, p *net.IPMask) *ipMaskValue {
+	*p = val
+	return (*ipMaskValue)(p)
+}
+
+func (i *ipMaskValue) String() string { return net.IPMask(*i).String() }
+func (i *ipMaskValue) Set(s string) error {
+	ip := ParseIPv4Mask(s)
+	if ip == nil {
+		return fmt.Errorf("failed to parse IP mask: %q", s)
+	}
+	*i = ipMaskValue(ip)
+	return nil
+}
+
+func (i *ipMaskValue) Type() string {
+	return "ipMask"
+}
+
+// ParseIPv4Mask written in IP form (e.g. 255.255.255.0).
+// This function should really belong to the net package.
+func ParseIPv4Mask(s string) net.IPMask {
+	mask := net.ParseIP(s)
+	if mask == nil {
+		if len(s) != 8 {
+			return nil
+		}
+		// net.IPMask.String() actually outputs things like ffffff00
+		// so write a horrible parser for that as well  :-(
+		m := []int{}
+		for i := 0; i < 4; i++ {
+			b := "0x" + s[2*i:2*i+2]
+			d, err := strconv.ParseInt(b, 0, 0)
+			if err != nil {
+				return nil
+			}
+			m = append(m, int(d))
+		}
+		s := fmt.Sprintf("%d.%d.%d.%d", m[0], m[1], m[2], m[3])
+		mask = net.ParseIP(s)
+		if mask == nil {
+			return nil
+		}
+	}
+	return net.IPv4Mask(mask[12], mask[13], mask[14], mask[15])
+}
+
+func parseIPv4Mask(sval string) (interface{}, error) {
+	mask := ParseIPv4Mask(sval)
+	if mask == nil {
+		return nil, fmt.Errorf("unable to parse %s as net.IPMask", sval)
+	}
+	return mask, nil
+}
+
+// GetIPv4Mask return the net.IPv4Mask value of a flag with the given name
+func (f *FlagSet) GetIPv4Mask(name string) (net.IPMask, error) {
+	val, err := f.getFlagType(name, "ipMask", parseIPv4Mask)
+	if err != nil {
+		return nil, err
+	}
+	return val.(net.IPMask), nil
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	f.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMaskVar defines an net.IPMask flag with specified name, default value, and usage string.
+// The argument p points to an net.IPMask variable in which to store the value of the flag.
+func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, "", usage)
+}
+
+// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) {
+	CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage)
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	p := new(net.IPMask)
+	f.IPMaskVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPMask defines an net.IPMask flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPMask variable that stores the value of the flag.
+func IPMask(name string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, "", value, usage)
+}
+
+// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash.
+func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask {
+	return CommandLine.IPMaskP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go
new file mode 100644
index 0000000..e2c1b8b
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/ipnet.go
@@ -0,0 +1,98 @@
+package pflag
+
+import (
+	"fmt"
+	"net"
+	"strings"
+)
+
+// IPNet adapts net.IPNet for use as a flag.
+type ipNetValue net.IPNet
+
+func (ipnet ipNetValue) String() string {
+	n := net.IPNet(ipnet)
+	return n.String()
+}
+
+func (ipnet *ipNetValue) Set(value string) error {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(value))
+	if err != nil {
+		return err
+	}
+	*ipnet = ipNetValue(*n)
+	return nil
+}
+
+func (*ipNetValue) Type() string {
+	return "ipNet"
+}
+
+func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue {
+	*p = val
+	return (*ipNetValue)(p)
+}
+
+func ipNetConv(sval string) (interface{}, error) {
+	_, n, err := net.ParseCIDR(strings.TrimSpace(sval))
+	if err == nil {
+		return *n, nil
+	}
+	return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval)
+}
+
+// GetIPNet return the net.IPNet value of a flag with the given name
+func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) {
+	val, err := f.getFlagType(name, "ipNet", ipNetConv)
+	if err != nil {
+		return net.IPNet{}, err
+	}
+	return val.(net.IPNet), nil
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	f.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string.
+// The argument p points to an net.IPNet variable in which to store the value of the flag.
+func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, "", usage)
+}
+
+// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash.
+func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) {
+	CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage)
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, "", value, usage)
+	return p
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	p := new(net.IPNet)
+	f.IPNetVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// IPNet defines an net.IPNet flag with specified name, default value, and usage string.
+// The return value is the address of an net.IPNet variable that stores the value of the flag.
+func IPNet(name string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, "", value, usage)
+}
+
+// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash.
+func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet {
+	return CommandLine.IPNetP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go
new file mode 100644
index 0000000..04e0a26
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string.go
@@ -0,0 +1,80 @@
+package pflag
+
+// -- string Value
+type stringValue string
+
+func newStringValue(val string, p *string) *stringValue {
+	*p = val
+	return (*stringValue)(p)
+}
+
+func (s *stringValue) Set(val string) error {
+	*s = stringValue(val)
+	return nil
+}
+func (s *stringValue) Type() string {
+	return "string"
+}
+
+func (s *stringValue) String() string { return string(*s) }
+
+func stringConv(sval string) (interface{}, error) {
+	return sval, nil
+}
+
+// GetString return the string value of a flag with the given name
+func (f *FlagSet) GetString(name string) (string, error) {
+	val, err := f.getFlagType(name, "string", stringConv)
+	if err != nil {
+		return "", err
+	}
+	return val.(string), nil
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func (f *FlagSet) StringVar(p *string, name string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) {
+	f.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// StringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a string variable in which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, "", usage)
+}
+
+// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringVarP(p *string, name, shorthand string, value string, usage string) {
+	CommandLine.VarP(newStringValue(value, p), name, shorthand, usage)
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func (f *FlagSet) String(name string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, "", value, usage)
+	return p
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string {
+	p := new(string)
+	f.StringVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// String defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a string variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+	return CommandLine.StringP(name, "", value, usage)
+}
+
+// StringP is like String, but accepts a shorthand letter that can be used after a single dash.
+func StringP(name, shorthand string, value string, usage string) *string {
+	return CommandLine.StringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go
new file mode 100644
index 0000000..fa7bc60
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_array.go
@@ -0,0 +1,103 @@
+package pflag
+
+// -- stringArray Value
+type stringArrayValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringArrayValue(val []string, p *[]string) *stringArrayValue {
+	ssv := new(stringArrayValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func (s *stringArrayValue) Set(val string) error {
+	if !s.changed {
+		*s.value = []string{val}
+		s.changed = true
+	} else {
+		*s.value = append(*s.value, val)
+	}
+	return nil
+}
+
+func (s *stringArrayValue) Type() string {
+	return "stringArray"
+}
+
+func (s *stringArrayValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringArrayConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a array with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringArray return the []string value of a flag with the given name
+func (f *FlagSet) GetStringArray(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringArray", stringArrayConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArrayVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArrayVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, "", usage)
+}
+
+// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage)
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringArrayVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringArray defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma. Use a StringSlice for that.
+func StringArray(name string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, "", value, usage)
+}
+
+// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash.
+func StringArrayP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringArrayP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go
new file mode 100644
index 0000000..0cd3ccc
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_slice.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+	"bytes"
+	"encoding/csv"
+	"strings"
+)
+
+// -- stringSlice Value
+type stringSliceValue struct {
+	value   *[]string
+	changed bool
+}
+
+func newStringSliceValue(val []string, p *[]string) *stringSliceValue {
+	ssv := new(stringSliceValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+func readAsCSV(val string) ([]string, error) {
+	if val == "" {
+		return []string{}, nil
+	}
+	stringReader := strings.NewReader(val)
+	csvReader := csv.NewReader(stringReader)
+	return csvReader.Read()
+}
+
+func writeAsCSV(vals []string) (string, error) {
+	b := &bytes.Buffer{}
+	w := csv.NewWriter(b)
+	err := w.Write(vals)
+	if err != nil {
+		return "", err
+	}
+	w.Flush()
+	return strings.TrimSuffix(b.String(), "\n"), nil
+}
+
+func (s *stringSliceValue) Set(val string) error {
+	v, err := readAsCSV(val)
+	if err != nil {
+		return err
+	}
+	if !s.changed {
+		*s.value = v
+	} else {
+		*s.value = append(*s.value, v...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringSliceValue) Type() string {
+	return "stringSlice"
+}
+
+func (s *stringSliceValue) String() string {
+	str, _ := writeAsCSV(*s.value)
+	return "[" + str + "]"
+}
+
+func stringSliceConv(sval string) (interface{}, error) {
+	sval = sval[1 : len(sval)-1]
+	// An empty string would cause a slice with one (empty) string
+	if len(sval) == 0 {
+		return []string{}, nil
+	}
+	return readAsCSV(sval)
+}
+
+// GetStringSlice return the []string value of a flag with the given name
+func (f *FlagSet) GetStringSlice(name string) ([]string, error) {
+	val, err := f.getFlagType(name, "stringSlice", stringSliceConv)
+	if err != nil {
+		return []string{}, err
+	}
+	return val.([]string), nil
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	f.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSliceVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a []string variable in which to store the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSliceVar(p *[]string, name string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, "", usage)
+}
+
+// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) {
+	CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage)
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	p := []string{}
+	f.StringSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringSlice defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a []string variable that stores the value of the flag.
+// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly.
+// For example:
+//   --ss="v1,v2" -ss="v3"
+// will result in
+//   []string{"v1", "v2", "v3"}
+func StringSlice(name string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, "", value, usage)
+}
+
+// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash.
+func StringSliceP(name, shorthand string, value []string, usage string) *[]string {
+	return CommandLine.StringSliceP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_int.go b/vendor/github.com/spf13/pflag/string_to_int.go
new file mode 100644
index 0000000..5ceda39
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_int.go
@@ -0,0 +1,149 @@
+package pflag
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- stringToInt Value
+type stringToIntValue struct {
+	value   *map[string]int
+	changed bool
+}
+
+func newStringToIntValue(val map[string]int, p *map[string]int) *stringToIntValue {
+	ssv := new(stringToIntValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToIntValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make(map[string]int, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		var err error
+		out[kv[0]], err = strconv.Atoi(kv[1])
+		if err != nil {
+			return err
+		}
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		for k, v := range out {
+			(*s.value)[k] = v
+		}
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringToIntValue) Type() string {
+	return "stringToInt"
+}
+
+func (s *stringToIntValue) String() string {
+	var buf bytes.Buffer
+	i := 0
+	for k, v := range *s.value {
+		if i > 0 {
+			buf.WriteRune(',')
+		}
+		buf.WriteString(k)
+		buf.WriteRune('=')
+		buf.WriteString(strconv.Itoa(v))
+		i++
+	}
+	return "[" + buf.String() + "]"
+}
+
+func stringToIntConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// An empty string would cause an empty map
+	if len(val) == 0 {
+		return map[string]int{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make(map[string]int, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		var err error
+		out[kv[0]], err = strconv.Atoi(kv[1])
+		if err != nil {
+			return nil, err
+		}
+	}
+	return out, nil
+}
+
+// GetStringToInt return the map[string]int value of a flag with the given name
+func (f *FlagSet) GetStringToInt(name string) (map[string]int, error) {
+	val, err := f.getFlagType(name, "stringToInt", stringToIntConv)
+	if err != nil {
+		return map[string]int{}, err
+	}
+	return val.(map[string]int), nil
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+	f.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+	f.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToIntVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]int variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToIntVar(p *map[string]int, name string, value map[string]int, usage string) {
+	CommandLine.VarP(newStringToIntValue(value, p), name, "", usage)
+}
+
+// StringToIntVarP is like StringToIntVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntVarP(p *map[string]int, name, shorthand string, value map[string]int, usage string) {
+	CommandLine.VarP(newStringToIntValue(value, p), name, shorthand, usage)
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToInt(name string, value map[string]int, usage string) *map[string]int {
+	p := map[string]int{}
+	f.StringToIntVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+	p := map[string]int{}
+	f.StringToIntVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringToInt defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]int variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToInt(name string, value map[string]int, usage string) *map[string]int {
+	return CommandLine.StringToIntP(name, "", value, usage)
+}
+
+// StringToIntP is like StringToInt, but accepts a shorthand letter that can be used after a single dash.
+func StringToIntP(name, shorthand string, value map[string]int, usage string) *map[string]int {
+	return CommandLine.StringToIntP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/string_to_string.go b/vendor/github.com/spf13/pflag/string_to_string.go
new file mode 100644
index 0000000..890a01a
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/string_to_string.go
@@ -0,0 +1,160 @@
+package pflag
+
+import (
+	"bytes"
+	"encoding/csv"
+	"fmt"
+	"strings"
+)
+
+// -- stringToString Value
+type stringToStringValue struct {
+	value   *map[string]string
+	changed bool
+}
+
+func newStringToStringValue(val map[string]string, p *map[string]string) *stringToStringValue {
+	ssv := new(stringToStringValue)
+	ssv.value = p
+	*ssv.value = val
+	return ssv
+}
+
+// Format: a=1,b=2
+func (s *stringToStringValue) Set(val string) error {
+	var ss []string
+	n := strings.Count(val, "=")
+	switch n {
+	case 0:
+		return fmt.Errorf("%s must be formatted as key=value", val)
+	case 1:
+		ss = append(ss, strings.Trim(val, `"`))
+	default:
+		r := csv.NewReader(strings.NewReader(val))
+		var err error
+		ss, err = r.Read()
+		if err != nil {
+			return err
+		}
+	}
+
+	out := make(map[string]string, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		out[kv[0]] = kv[1]
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		for k, v := range out {
+			(*s.value)[k] = v
+		}
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *stringToStringValue) Type() string {
+	return "stringToString"
+}
+
+func (s *stringToStringValue) String() string {
+	records := make([]string, 0, len(*s.value)>>1)
+	for k, v := range *s.value {
+		records = append(records, k+"="+v)
+	}
+
+	var buf bytes.Buffer
+	w := csv.NewWriter(&buf)
+	if err := w.Write(records); err != nil {
+		panic(err)
+	}
+	w.Flush()
+	return "[" + strings.TrimSpace(buf.String()) + "]"
+}
+
+func stringToStringConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// An empty string would cause an empty map
+	if len(val) == 0 {
+		return map[string]string{}, nil
+	}
+	r := csv.NewReader(strings.NewReader(val))
+	ss, err := r.Read()
+	if err != nil {
+		return nil, err
+	}
+	out := make(map[string]string, len(ss))
+	for _, pair := range ss {
+		kv := strings.SplitN(pair, "=", 2)
+		if len(kv) != 2 {
+			return nil, fmt.Errorf("%s must be formatted as key=value", pair)
+		}
+		out[kv[0]] = kv[1]
+	}
+	return out, nil
+}
+
+// GetStringToString return the map[string]string value of a flag with the given name
+func (f *FlagSet) GetStringToString(name string) (map[string]string, error) {
+	val, err := f.getFlagType(name, "stringToString", stringToStringConv)
+	if err != nil {
+		return map[string]string{}, err
+	}
+	return val.(map[string]string), nil
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the values of the multiple flags.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+	f.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+	f.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToStringVar defines a string flag with specified name, default value, and usage string.
+// The argument p points to a map[string]string variable in which to store the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToStringVar(p *map[string]string, name string, value map[string]string, usage string) {
+	CommandLine.VarP(newStringToStringValue(value, p), name, "", usage)
+}
+
+// StringToStringVarP is like StringToStringVar, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringVarP(p *map[string]string, name, shorthand string, value map[string]string, usage string) {
+	CommandLine.VarP(newStringToStringValue(value, p), name, shorthand, usage)
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func (f *FlagSet) StringToString(name string, value map[string]string, usage string) *map[string]string {
+	p := map[string]string{}
+	f.StringToStringVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+	p := map[string]string{}
+	f.StringToStringVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// StringToString defines a string flag with specified name, default value, and usage string.
+// The return value is the address of a map[string]string variable that stores the value of the flag.
+// The value of each argument will not try to be separated by comma
+func StringToString(name string, value map[string]string, usage string) *map[string]string {
+	return CommandLine.StringToStringP(name, "", value, usage)
+}
+
+// StringToStringP is like StringToString, but accepts a shorthand letter that can be used after a single dash.
+func StringToStringP(name, shorthand string, value map[string]string, usage string) *map[string]string {
+	return CommandLine.StringToStringP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go
new file mode 100644
index 0000000..dcbc2b7
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint Value
+type uintValue uint
+
+func newUintValue(val uint, p *uint) *uintValue {
+	*p = val
+	return (*uintValue)(p)
+}
+
+func (i *uintValue) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uintValue(v)
+	return err
+}
+
+func (i *uintValue) Type() string {
+	return "uint"
+}
+
+func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uintConv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 0)
+	if err != nil {
+		return 0, err
+	}
+	return uint(v), nil
+}
+
+// GetUint return the uint value of a flag with the given name
+func (f *FlagSet) GetUint(name string) (uint, error) {
+	val, err := f.getFlagType(name, "uint", uintConv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint), nil
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	f.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, "", usage)
+}
+
+// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash.
+func UintVarP(p *uint, name, shorthand string, value uint, usage string) {
+	CommandLine.VarP(newUintValue(value, p), name, shorthand, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint(name string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, "", value, usage)
+	return p
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint {
+	p := new(uint)
+	f.UintVarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, "", value, usage)
+}
+
+// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash.
+func UintP(name, shorthand string, value uint, usage string) *uint {
+	return CommandLine.UintP(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go
new file mode 100644
index 0000000..7e9914e
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint16 value
+type uint16Value uint16
+
+func newUint16Value(val uint16, p *uint16) *uint16Value {
+	*p = val
+	return (*uint16Value)(p)
+}
+
+func (i *uint16Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 16)
+	*i = uint16Value(v)
+	return err
+}
+
+func (i *uint16Value) Type() string {
+	return "uint16"
+}
+
+func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint16Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 16)
+	if err != nil {
+		return 0, err
+	}
+	return uint16(v), nil
+}
+
+// GetUint16 return the uint16 value of a flag with the given name
+func (f *FlagSet) GetUint16(name string) (uint16, error) {
+	val, err := f.getFlagType(name, "uint16", uint16Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint16), nil
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint variable in which to store the value of the flag.
+func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	f.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16Var defines a uint flag with specified name, default value, and usage string.
+// The argument p points to a uint  variable in which to store the value of the flag.
+func Uint16Var(p *uint16, name string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, "", usage)
+}
+
+// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) {
+	CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage)
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	p := new(uint16)
+	f.Uint16VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint16 defines a uint flag with specified name, default value, and usage string.
+// The return value is the address of a uint  variable that stores the value of the flag.
+func Uint16(name string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, "", value, usage)
+}
+
+// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash.
+func Uint16P(name, shorthand string, value uint16, usage string) *uint16 {
+	return CommandLine.Uint16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go
new file mode 100644
index 0000000..d802453
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint32.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint32 value
+type uint32Value uint32
+
+func newUint32Value(val uint32, p *uint32) *uint32Value {
+	*p = val
+	return (*uint32Value)(p)
+}
+
+func (i *uint32Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 32)
+	*i = uint32Value(v)
+	return err
+}
+
+func (i *uint32Value) Type() string {
+	return "uint32"
+}
+
+func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint32Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(v), nil
+}
+
+// GetUint32 return the uint32 value of a flag with the given name
+func (f *FlagSet) GetUint32(name string) (uint32, error) {
+	val, err := f.getFlagType(name, "uint32", uint32Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint32), nil
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32 variable in which to store the value of the flag.
+func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	f.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32Var defines a uint32 flag with specified name, default value, and usage string.
+// The argument p points to a uint32  variable in which to store the value of the flag.
+func Uint32Var(p *uint32, name string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, "", usage)
+}
+
+// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) {
+	CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage)
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	p := new(uint32)
+	f.Uint32VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint32 defines a uint32 flag with specified name, default value, and usage string.
+// The return value is the address of a uint32  variable that stores the value of the flag.
+func Uint32(name string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, "", value, usage)
+}
+
+// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash.
+func Uint32P(name, shorthand string, value uint32, usage string) *uint32 {
+	return CommandLine.Uint32P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go
new file mode 100644
index 0000000..f62240f
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint64.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint64 Value
+type uint64Value uint64
+
+func newUint64Value(val uint64, p *uint64) *uint64Value {
+	*p = val
+	return (*uint64Value)(p)
+}
+
+func (i *uint64Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 64)
+	*i = uint64Value(v)
+	return err
+}
+
+func (i *uint64Value) Type() string {
+	return "uint64"
+}
+
+func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint64Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 64)
+	if err != nil {
+		return 0, err
+	}
+	return uint64(v), nil
+}
+
+// GetUint64 return the uint64 value of a flag with the given name
+func (f *FlagSet) GetUint64(name string) (uint64, error) {
+	val, err := f.getFlagType(name, "uint64", uint64Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint64), nil
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	f.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value, and usage string.
+// The argument p points to a uint64 variable in which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, "", usage)
+}
+
+// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) {
+	CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	p := new(uint64)
+	f.Uint64VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint64 defines a uint64 flag with specified name, default value, and usage string.
+// The return value is the address of a uint64 variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, "", value, usage)
+}
+
+// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash.
+func Uint64P(name, shorthand string, value uint64, usage string) *uint64 {
+	return CommandLine.Uint64P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go
new file mode 100644
index 0000000..bb0e83c
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint8.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- uint8 Value
+type uint8Value uint8
+
+func newUint8Value(val uint8, p *uint8) *uint8Value {
+	*p = val
+	return (*uint8Value)(p)
+}
+
+func (i *uint8Value) Set(s string) error {
+	v, err := strconv.ParseUint(s, 0, 8)
+	*i = uint8Value(v)
+	return err
+}
+
+func (i *uint8Value) Type() string {
+	return "uint8"
+}
+
+func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) }
+
+func uint8Conv(sval string) (interface{}, error) {
+	v, err := strconv.ParseUint(sval, 0, 8)
+	if err != nil {
+		return 0, err
+	}
+	return uint8(v), nil
+}
+
+// GetUint8 return the uint8 value of a flag with the given name
+func (f *FlagSet) GetUint8(name string) (uint8, error) {
+	val, err := f.getFlagType(name, "uint8", uint8Conv)
+	if err != nil {
+		return 0, err
+	}
+	return val.(uint8), nil
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	f.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8Var defines a uint8 flag with specified name, default value, and usage string.
+// The argument p points to a uint8 variable in which to store the value of the flag.
+func Uint8Var(p *uint8, name string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, "", usage)
+}
+
+// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash.
+func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) {
+	CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage)
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, "", value, usage)
+	return p
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	p := new(uint8)
+	f.Uint8VarP(p, name, shorthand, value, usage)
+	return p
+}
+
+// Uint8 defines a uint8 flag with specified name, default value, and usage string.
+// The return value is the address of a uint8 variable that stores the value of the flag.
+func Uint8(name string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, "", value, usage)
+}
+
+// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash.
+func Uint8P(name, shorthand string, value uint8, usage string) *uint8 {
+	return CommandLine.Uint8P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/pflag/uint_slice.go b/vendor/github.com/spf13/pflag/uint_slice.go
new file mode 100644
index 0000000..edd94c6
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/uint_slice.go
@@ -0,0 +1,126 @@
+package pflag
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// -- uintSlice Value
+type uintSliceValue struct {
+	value   *[]uint
+	changed bool
+}
+
+func newUintSliceValue(val []uint, p *[]uint) *uintSliceValue {
+	uisv := new(uintSliceValue)
+	uisv.value = p
+	*uisv.value = val
+	return uisv
+}
+
+func (s *uintSliceValue) Set(val string) error {
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return err
+		}
+		out[i] = uint(u)
+	}
+	if !s.changed {
+		*s.value = out
+	} else {
+		*s.value = append(*s.value, out...)
+	}
+	s.changed = true
+	return nil
+}
+
+func (s *uintSliceValue) Type() string {
+	return "uintSlice"
+}
+
+func (s *uintSliceValue) String() string {
+	out := make([]string, len(*s.value))
+	for i, d := range *s.value {
+		out[i] = fmt.Sprintf("%d", d)
+	}
+	return "[" + strings.Join(out, ",") + "]"
+}
+
+func uintSliceConv(val string) (interface{}, error) {
+	val = strings.Trim(val, "[]")
+	// Empty string would cause a slice with one (empty) entry
+	if len(val) == 0 {
+		return []uint{}, nil
+	}
+	ss := strings.Split(val, ",")
+	out := make([]uint, len(ss))
+	for i, d := range ss {
+		u, err := strconv.ParseUint(d, 10, 0)
+		if err != nil {
+			return nil, err
+		}
+		out[i] = uint(u)
+	}
+	return out, nil
+}
+
+// GetUintSlice returns the []uint value of a flag with the given name.
+func (f *FlagSet) GetUintSlice(name string) ([]uint, error) {
+	val, err := f.getFlagType(name, "uintSlice", uintSliceConv)
+	if err != nil {
+		return []uint{}, err
+	}
+	return val.([]uint), nil
+}
+
+// UintSliceVar defines a uintSlice flag with specified name, default value, and usage string.
+// The argument p points to a []uint variable in which to store the value of the flag.
+func (f *FlagSet) UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	f.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSliceVar defines a uint[] flag with specified name, default value, and usage string.
+// The argument p points to a uint[] variable in which to store the value of the flag.
+func UintSliceVar(p *[]uint, name string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, "", usage)
+}
+
+// UintSliceVarP is like the UintSliceVar, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceVarP(p *[]uint, name, shorthand string, value []uint, usage string) {
+	CommandLine.VarP(newUintSliceValue(value, p), name, shorthand, usage)
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func (f *FlagSet) UintSlice(name string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, "", value, usage)
+	return &p
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	p := []uint{}
+	f.UintSliceVarP(&p, name, shorthand, value, usage)
+	return &p
+}
+
+// UintSlice defines a []uint flag with specified name, default value, and usage string.
+// The return value is the address of a []uint variable that stores the value of the flag.
+func UintSlice(name string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, "", value, usage)
+}
+
+// UintSliceP is like UintSlice, but accepts a shorthand letter that can be used after a single dash.
+func UintSliceP(name, shorthand string, value []uint, usage string) *[]uint {
+	return CommandLine.UintSliceP(name, shorthand, value, usage)
+}