[VOL-4638] Initial commit
Change-Id: I5e785d017e1d27783b24591b52f20f442895e602
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = false
+
+ // ptrSize is the size of a pointer on the current arch.
+ ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data. It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
+ }
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
+ }
+ panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line. The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+ // UnsafeDisabled is a build-time constant which specifies whether or
+ // not access to the unsafe package is available.
+ UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data. However, doing this relies on access to
+// the unsafe package. This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "sort"
+ "strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead. This mirrors
+// the technique used in the fmt package.
+var (
+ panicBytes = []byte("(PANIC=")
+ plusBytes = []byte("+")
+ iBytes = []byte("i")
+ trueBytes = []byte("true")
+ falseBytes = []byte("false")
+ interfaceBytes = []byte("(interface {})")
+ commaNewlineBytes = []byte(",\n")
+ newlineBytes = []byte("\n")
+ openBraceBytes = []byte("{")
+ openBraceNewlineBytes = []byte("{\n")
+ closeBraceBytes = []byte("}")
+ asteriskBytes = []byte("*")
+ colonBytes = []byte(":")
+ colonSpaceBytes = []byte(": ")
+ openParenBytes = []byte("(")
+ closeParenBytes = []byte(")")
+ spaceBytes = []byte(" ")
+ pointerChainBytes = []byte("->")
+ nilAngleBytes = []byte("<nil>")
+ maxNewlineBytes = []byte("<max depth reached>\n")
+ maxShortBytes = []byte("<max>")
+ circularBytes = []byte("<already shown>")
+ circularShortBytes = []byte("<shown>")
+ invalidAngleBytes = []byte("<invalid>")
+ openBracketBytes = []byte("[")
+ closeBracketBytes = []byte("]")
+ percentBytes = []byte("%")
+ precisionBytes = []byte(".")
+ openAngleBytes = []byte("<")
+ closeAngleBytes = []byte(">")
+ openMapBytes = []byte("map[")
+ closeMapBytes = []byte("]")
+ lenEqualsBytes = []byte("len=")
+ capEqualsBytes = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+ if err := recover(); err != nil {
+ w.Write(panicBytes)
+ fmt.Fprintf(w, "%v", err)
+ w.Write(closeParenBytes)
+ }
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+ // We need an interface to check if the type implements the error or
+ // Stringer interface. However, the reflect package won't give us an
+ // interface on certain things like unexported struct fields in order
+ // to enforce visibility rules. We use unsafe, when it's available,
+ // to bypass these restrictions since this package does not mutate the
+ // values.
+ if !v.CanInterface() {
+ if UnsafeDisabled {
+ return false
+ }
+
+ v = unsafeReflectValue(v)
+ }
+
+ // Choose whether or not to do error and Stringer interface lookups against
+ // the base type or a pointer to the base type depending on settings.
+ // Technically calling one of these methods with a pointer receiver can
+ // mutate the value, however, types which choose to satisify an error or
+ // Stringer interface with a pointer receiver should not be mutating their
+ // state inside these interface methods.
+ if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+ v = unsafeReflectValue(v)
+ }
+ if v.CanAddr() {
+ v = v.Addr()
+ }
+
+ // Is it an error or Stringer?
+ switch iface := v.Interface().(type) {
+ case error:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.Error()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+
+ w.Write([]byte(iface.Error()))
+ return true
+
+ case fmt.Stringer:
+ defer catchPanic(w, v)
+ if cs.ContinueOnMethod {
+ w.Write(openParenBytes)
+ w.Write([]byte(iface.String()))
+ w.Write(closeParenBytes)
+ w.Write(spaceBytes)
+ return false
+ }
+ w.Write([]byte(iface.String()))
+ return true
+ }
+ return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+ if val {
+ w.Write(trueBytes)
+ } else {
+ w.Write(falseBytes)
+ }
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+ w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+ w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+ w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+ r := real(c)
+ w.Write(openParenBytes)
+ w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+ i := imag(c)
+ if i >= 0 {
+ w.Write(plusBytes)
+ }
+ w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+ w.Write(iBytes)
+ w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+ // Null pointer.
+ num := uint64(p)
+ if num == 0 {
+ w.Write(nilAngleBytes)
+ return
+ }
+
+ // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+ buf := make([]byte, 18)
+
+ // It's simpler to construct the hex string right to left.
+ base := uint64(16)
+ i := len(buf) - 1
+ for num >= base {
+ buf[i] = hexDigits[num%base]
+ num /= base
+ i--
+ }
+ buf[i] = hexDigits[num]
+
+ // Add '0x' prefix.
+ i--
+ buf[i] = 'x'
+ i--
+ buf[i] = '0'
+
+ // Strip unused leading bytes.
+ buf = buf[i:]
+ w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+ values []reflect.Value
+ strings []string // either nil or same len and values
+ cs *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted. It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+ vs := &valuesSorter{values: values, cs: cs}
+ if canSortSimply(vs.values[0].Kind()) {
+ return vs
+ }
+ if !cs.DisableMethods {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ b := bytes.Buffer{}
+ if !handleMethods(cs, &b, vs.values[i]) {
+ vs.strings = nil
+ break
+ }
+ vs.strings[i] = b.String()
+ }
+ }
+ if vs.strings == nil && cs.SpewKeys {
+ vs.strings = make([]string, len(values))
+ for i := range vs.values {
+ vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+ }
+ }
+ return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+ // This switch parallels valueSortLess, except for the default case.
+ switch kind {
+ case reflect.Bool:
+ return true
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return true
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Uintptr:
+ return true
+ case reflect.Array:
+ return true
+ }
+ return false
+}
+
+// Len returns the number of values in the slice. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+ return len(s.values)
+}
+
+// Swap swaps the values at the passed indices. It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+ s.values[i], s.values[j] = s.values[j], s.values[i]
+ if s.strings != nil {
+ s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+ }
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value. It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ return a.Int() < b.Int()
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ return a.Uint() < b.Uint()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.String:
+ return a.String() < b.String()
+ case reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Array:
+ // Compare the contents of both arrays.
+ l := a.Len()
+ for i := 0; i < l; i++ {
+ av := a.Index(i)
+ bv := b.Index(i)
+ if av.Interface() == bv.Interface() {
+ continue
+ }
+ return valueSortLess(av, bv)
+ }
+ }
+ return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j. It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+ if s.strings == nil {
+ return valueSortLess(s.values[i], s.values[j])
+ }
+ return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer. Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+ if len(values) == 0 {
+ return
+ }
+ sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values. There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality. Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation. You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings. See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+ // Indent specifies the string to use for each indentation level. The
+ // global config instance that all top-level functions use set this to a
+ // single space by default. If you would like more indentation, you might
+ // set this to a tab with "\t" or perhaps two spaces with " ".
+ Indent string
+
+ // MaxDepth controls the maximum number of levels to descend into nested
+ // data structures. The default, 0, means there is no limit.
+ //
+ // NOTE: Circular data structures are properly detected, so it is not
+ // necessary to set this value unless you specifically want to limit deeply
+ // nested data structures.
+ MaxDepth int
+
+ // DisableMethods specifies whether or not error and Stringer interfaces are
+ // invoked for types that implement them.
+ DisableMethods bool
+
+ // DisablePointerMethods specifies whether or not to check for and invoke
+ // error and Stringer interfaces on types which only accept a pointer
+ // receiver when the current type is not a pointer.
+ //
+ // NOTE: This might be an unsafe action since calling one of these methods
+ // with a pointer receiver could technically mutate the value, however,
+ // in practice, types which choose to satisify an error or Stringer
+ // interface with a pointer receiver should not be mutating their state
+ // inside these interface methods. As a result, this option relies on
+ // access to the unsafe package, so it will not have any effect when
+ // running in environments without access to the unsafe package such as
+ // Google App Engine or with the "safe" build tag specified.
+ DisablePointerMethods bool
+
+ // DisablePointerAddresses specifies whether to disable the printing of
+ // pointer addresses. This is useful when diffing data structures in tests.
+ DisablePointerAddresses bool
+
+ // DisableCapacities specifies whether to disable the printing of capacities
+ // for arrays, slices, maps and channels. This is useful when diffing
+ // data structures in tests.
+ DisableCapacities bool
+
+ // ContinueOnMethod specifies whether or not recursion should continue once
+ // a custom error or Stringer interface is invoked. The default, false,
+ // means it will print the results of invoking the custom error or Stringer
+ // interface and return immediately instead of continuing to recurse into
+ // the internals of the data type.
+ //
+ // NOTE: This flag does not have any effect if method invocation is disabled
+ // via the DisableMethods or DisablePointerMethods options.
+ ContinueOnMethod bool
+
+ // SortKeys specifies map keys should be sorted before being printed. Use
+ // this to have a more deterministic, diffable output. Note that only
+ // native types (bool, int, uint, floats, uintptr and string) and types
+ // that support the error or Stringer interfaces (if methods are
+ // enabled) are supported, with other types sorted according to the
+ // reflect.Value.String() output which guarantees display stability.
+ SortKeys bool
+
+ // SpewKeys specifies that, as a last resort attempt, map keys should
+ // be spewed to strings and sorted by those strings. This is only
+ // considered if SortKeys is true.
+ SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the formatted string as a value that satisfies error. See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+ return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter. It returns
+// the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+ fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+ fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(c, &buf, a...)
+ return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = newFormatter(c, arg)
+ }
+ return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// Indent: " "
+// MaxDepth: 0
+// DisableMethods: false
+// DisablePointerMethods: false
+// ContinueOnMethod: false
+// SortKeys: false
+func NewDefaultConfig() *ConfigState {
+ return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output (only when using
+ Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+ * Dump style which prints with newlines, customizable indentation,
+ and additional debug information such as types and all pointer addresses
+ used to indirect to the final value
+ * A custom Formatter interface that integrates cleanly with the standard fmt
+ package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+ similar to the default %v while providing the additional functionality
+ outlined above and passing unsupported format verbs such as %x and %q
+ along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew. See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+ spew.Dump(myVar1, myVar2, ...)
+ spew.Fdump(someWriter, myVar1, myVar2, ...)
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type. For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions. This allows concurrent configuration
+options. See the ConfigState documentation for more details.
+
+The following configuration options are available:
+ * Indent
+ String to use for each indentation level for Dump functions.
+ It is a single space by default. A popular alternative is "\t".
+
+ * MaxDepth
+ Maximum number of levels to descend into nested data structures.
+ There is no limit by default.
+
+ * DisableMethods
+ Disables invocation of error and Stringer interface methods.
+ Method invocation is enabled by default.
+
+ * DisablePointerMethods
+ Disables invocation of error and Stringer interface methods on types
+ which only accept pointer receivers from non-pointer variables.
+ Pointer method invocation is enabled by default.
+
+ * DisablePointerAddresses
+ DisablePointerAddresses specifies whether to disable the printing of
+ pointer addresses. This is useful when diffing data structures in tests.
+
+ * DisableCapacities
+ DisableCapacities specifies whether to disable the printing of
+ capacities for arrays, slices, maps and channels. This is useful when
+ diffing data structures in tests.
+
+ * ContinueOnMethod
+ Enables recursion into types after invoking error and Stringer interface
+ methods. Recursion after method invocation is disabled by default.
+
+ * SortKeys
+ Specifies map keys should be sorted before being printed. Use
+ this to have a more deterministic, diffable output. Note that
+ only native types (bool, int, uint, floats, uintptr and string)
+ and types which implement error or Stringer interfaces are
+ supported with other types sorted according to the
+ reflect.Value.String() output which guarantees display
+ stability. Natural map order is used by default.
+
+ * SpewKeys
+ Specifies that, as a last resort attempt, map keys should be
+ spewed to strings and sorted by those strings. This is only
+ considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+ spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer. For example, to dump to standard error:
+
+ spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+ str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+ (main.Foo) {
+ unexportedField: (*main.Bar)(0xf84002e210)({
+ flag: (main.Flag) flagTwo,
+ data: (uintptr) <nil>
+ }),
+ ExportedField: (map[interface {}]interface {}) (len=1) {
+ (string) (len=3) "one": (bool) true
+ }
+ }
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+ ([]uint8) (len=32 cap=32) {
+ 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
+ 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
+ 00000020 31 32 |12|
+ }
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
+functions have syntax you are most likely already familiar with:
+
+ spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+ spew.Println(myVar, myVar2)
+ spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+ spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+ %v: <**>5
+ %+v: <**>(0xf8400420d0->0xf8400420c8)5
+ %#v: (**uint8)5
+ %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+ %v: <*>{1 <*><shown>}
+ %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+ %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+ %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output. Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ // uint8Type is a reflect.Type representing a uint8. It is used to
+ // convert cgo types to uint8 slices for hexdumping.
+ uint8Type = reflect.TypeOf(uint8(0))
+
+ // cCharRE is a regular expression that matches a cgo char.
+ // It is used to detect character arrays to hexdump them.
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+ // cUnsignedCharRE is a regular expression that matches a cgo unsigned
+ // char. It is used to detect unsigned character arrays to hexdump
+ // them.
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+ // cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+ // It is used to detect uint8_t arrays to hexdump them.
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+ w io.Writer
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ ignoreNextIndent bool
+ cs *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+ if d.ignoreNextIndent {
+ d.ignoreNextIndent = false
+ return
+ }
+ d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ v = v.Elem()
+ }
+ return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range d.pointers {
+ if depth >= d.depth {
+ delete(d.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by dereferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ d.pointers[addr] = d.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type information.
+ d.w.Write(openParenBytes)
+ d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+ d.w.Write([]byte(ve.Type().String()))
+ d.w.Write(closeParenBytes)
+
+ // Display pointer information.
+ if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+ d.w.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ d.w.Write(pointerChainBytes)
+ }
+ printHexPtr(d.w, addr)
+ }
+ d.w.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ d.w.Write(openParenBytes)
+ switch {
+ case nilFound:
+ d.w.Write(nilAngleBytes)
+
+ case cycleFound:
+ d.w.Write(circularBytes)
+
+ default:
+ d.ignoreNextType = true
+ d.dump(ve)
+ }
+ d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+ // Determine whether this type should be hex dumped or not. Also,
+ // for types which should be hexdumped, try to use the underlying data
+ // first, then fall back to trying to convert them to a uint8 slice.
+ var buf []uint8
+ doConvert := false
+ doHexDump := false
+ numEntries := v.Len()
+ if numEntries > 0 {
+ vt := v.Index(0).Type()
+ vts := vt.String()
+ switch {
+ // C types that need to be converted.
+ case cCharRE.MatchString(vts):
+ fallthrough
+ case cUnsignedCharRE.MatchString(vts):
+ fallthrough
+ case cUint8tCharRE.MatchString(vts):
+ doConvert = true
+
+ // Try to use existing uint8 slices and fall back to converting
+ // and copying if that fails.
+ case vt.Kind() == reflect.Uint8:
+ // We need an addressable interface to convert the type
+ // to a byte slice. However, the reflect package won't
+ // give us an interface on certain things like
+ // unexported struct fields in order to enforce
+ // visibility rules. We use unsafe, when available, to
+ // bypass these restrictions since this package does not
+ // mutate the values.
+ vs := v
+ if !vs.CanInterface() || !vs.CanAddr() {
+ vs = unsafeReflectValue(vs)
+ }
+ if !UnsafeDisabled {
+ vs = vs.Slice(0, numEntries)
+
+ // Use the existing uint8 slice if it can be
+ // type asserted.
+ iface := vs.Interface()
+ if slice, ok := iface.([]uint8); ok {
+ buf = slice
+ doHexDump = true
+ break
+ }
+ }
+
+ // The underlying data needs to be converted if it can't
+ // be type asserted to a uint8 slice.
+ doConvert = true
+ }
+
+ // Copy and convert the underlying type if needed.
+ if doConvert && vt.ConvertibleTo(uint8Type) {
+ // Convert and copy each element into a uint8 byte
+ // slice.
+ buf = make([]uint8, numEntries)
+ for i := 0; i < numEntries; i++ {
+ vv := v.Index(i)
+ buf[i] = uint8(vv.Convert(uint8Type).Uint())
+ }
+ doHexDump = true
+ }
+ }
+
+ // Hexdump the entire slice as needed.
+ if doHexDump {
+ indent := strings.Repeat(d.cs.Indent, d.depth)
+ str := indent + hex.Dump(buf)
+ str = strings.Replace(str, "\n", "\n"+indent, -1)
+ str = strings.TrimRight(str, d.cs.Indent)
+ d.w.Write([]byte(str))
+ return
+ }
+
+ // Recursively call dump for each item.
+ for i := 0; i < numEntries; i++ {
+ d.dump(d.unpackValue(v.Index(i)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+}
+
+// dump is the main workhorse for dumping a value. It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately. It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ d.w.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ d.indent()
+ d.dumpPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !d.ignoreNextType {
+ d.indent()
+ d.w.Write(openParenBytes)
+ d.w.Write([]byte(v.Type().String()))
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+ d.ignoreNextType = false
+
+ // Display length and capacity if the built-in len and cap functions
+ // work with the value's kind and the len/cap itself is non-zero.
+ valueLen, valueCap := 0, 0
+ switch v.Kind() {
+ case reflect.Array, reflect.Slice, reflect.Chan:
+ valueLen, valueCap = v.Len(), v.Cap()
+ case reflect.Map, reflect.String:
+ valueLen = v.Len()
+ }
+ if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+ d.w.Write(openParenBytes)
+ if valueLen != 0 {
+ d.w.Write(lenEqualsBytes)
+ printInt(d.w, int64(valueLen), 10)
+ }
+ if !d.cs.DisableCapacities && valueCap != 0 {
+ if valueLen != 0 {
+ d.w.Write(spaceBytes)
+ }
+ d.w.Write(capEqualsBytes)
+ printInt(d.w, int64(valueCap), 10)
+ }
+ d.w.Write(closeParenBytes)
+ d.w.Write(spaceBytes)
+ }
+
+ // Call Stringer/error interfaces if they exist and the handle methods flag
+ // is enabled
+ if !d.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(d.cs, d.w, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(d.w, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(d.w, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(d.w, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(d.w, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(d.w, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(d.w, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(d.w, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ d.dumpSlice(v)
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.String:
+ d.w.Write([]byte(strconv.Quote(v.String())))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ numEntries := v.Len()
+ keys := v.MapKeys()
+ if d.cs.SortKeys {
+ sortValues(keys, d.cs)
+ }
+ for i, key := range keys {
+ d.dump(d.unpackValue(key))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.MapIndex(key)))
+ if i < (numEntries - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Struct:
+ d.w.Write(openBraceNewlineBytes)
+ d.depth++
+ if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+ d.indent()
+ d.w.Write(maxNewlineBytes)
+ } else {
+ vt := v.Type()
+ numFields := v.NumField()
+ for i := 0; i < numFields; i++ {
+ d.indent()
+ vtf := vt.Field(i)
+ d.w.Write([]byte(vtf.Name))
+ d.w.Write(colonSpaceBytes)
+ d.ignoreNextIndent = true
+ d.dump(d.unpackValue(v.Field(i)))
+ if i < (numFields - 1) {
+ d.w.Write(commaNewlineBytes)
+ } else {
+ d.w.Write(newlineBytes)
+ }
+ }
+ }
+ d.depth--
+ d.indent()
+ d.w.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(d.w, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(d.w, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it in case any new
+ // types are added.
+ default:
+ if v.CanInterface() {
+ fmt.Fprintf(d.w, "%v", v.Interface())
+ } else {
+ fmt.Fprintf(d.w, "%v", v.String())
+ }
+ }
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+ for _, arg := range a {
+ if arg == nil {
+ w.Write(interfaceBytes)
+ w.Write(spaceBytes)
+ w.Write(nilAngleBytes)
+ w.Write(newlineBytes)
+ continue
+ }
+
+ d := dumpState{w: w, cs: cs}
+ d.pointers = make(map[uintptr]int)
+ d.dump(reflect.ValueOf(arg))
+ d.w.Write(newlineBytes)
+ }
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w. It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+ fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+ var buf bytes.Buffer
+ fdump(&Config, &buf, a...)
+ return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value. It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+ * Pointers are dereferenced and followed
+ * Circular data structures are detected and handled properly
+ * Custom Stringer/error interfaces are optionally invoked, including
+ on unexported types
+ * Custom types which only implement the Stringer/error interfaces via
+ a pointer receiver are optionally invoked when passing non-pointer
+ variables
+ * Byte arrays and slices are dumped like the hexdump -C command which
+ includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config. See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+ fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation. The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+ value interface{}
+ fs fmt.State
+ depth int
+ pointers map[uintptr]int
+ ignoreNextType bool
+ cs *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type. Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ buf.WriteRune('v')
+
+ format = buf.String()
+ return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package. This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+ buf := bytes.NewBuffer(percentBytes)
+
+ for _, flag := range supportedFlags {
+ if f.fs.Flag(int(flag)) {
+ buf.WriteRune(flag)
+ }
+ }
+
+ if width, ok := f.fs.Width(); ok {
+ buf.WriteString(strconv.Itoa(width))
+ }
+
+ if precision, ok := f.fs.Precision(); ok {
+ buf.Write(precisionBytes)
+ buf.WriteString(strconv.Itoa(precision))
+ }
+
+ buf.WriteRune(verb)
+
+ format = buf.String()
+ return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+ if v.Kind() == reflect.Interface {
+ f.ignoreNextType = false
+ if !v.IsNil() {
+ v = v.Elem()
+ }
+ }
+ return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+ // Display nil if top level pointer is nil.
+ showTypes := f.fs.Flag('#')
+ if v.IsNil() && (!showTypes || f.ignoreNextType) {
+ f.fs.Write(nilAngleBytes)
+ return
+ }
+
+ // Remove pointers at or below the current depth from map used to detect
+ // circular refs.
+ for k, depth := range f.pointers {
+ if depth >= f.depth {
+ delete(f.pointers, k)
+ }
+ }
+
+ // Keep list of all dereferenced pointers to possibly show later.
+ pointerChain := make([]uintptr, 0)
+
+ // Figure out how many levels of indirection there are by derferencing
+ // pointers and unpacking interfaces down the chain while detecting circular
+ // references.
+ nilFound := false
+ cycleFound := false
+ indirects := 0
+ ve := v
+ for ve.Kind() == reflect.Ptr {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ indirects++
+ addr := ve.Pointer()
+ pointerChain = append(pointerChain, addr)
+ if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+ cycleFound = true
+ indirects--
+ break
+ }
+ f.pointers[addr] = f.depth
+
+ ve = ve.Elem()
+ if ve.Kind() == reflect.Interface {
+ if ve.IsNil() {
+ nilFound = true
+ break
+ }
+ ve = ve.Elem()
+ }
+ }
+
+ // Display type or indirection level depending on flags.
+ if showTypes && !f.ignoreNextType {
+ f.fs.Write(openParenBytes)
+ f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+ f.fs.Write([]byte(ve.Type().String()))
+ f.fs.Write(closeParenBytes)
+ } else {
+ if nilFound || cycleFound {
+ indirects += strings.Count(ve.Type().String(), "*")
+ }
+ f.fs.Write(openAngleBytes)
+ f.fs.Write([]byte(strings.Repeat("*", indirects)))
+ f.fs.Write(closeAngleBytes)
+ }
+
+ // Display pointer information depending on flags.
+ if f.fs.Flag('+') && (len(pointerChain) > 0) {
+ f.fs.Write(openParenBytes)
+ for i, addr := range pointerChain {
+ if i > 0 {
+ f.fs.Write(pointerChainBytes)
+ }
+ printHexPtr(f.fs, addr)
+ }
+ f.fs.Write(closeParenBytes)
+ }
+
+ // Display dereferenced value.
+ switch {
+ case nilFound:
+ f.fs.Write(nilAngleBytes)
+
+ case cycleFound:
+ f.fs.Write(circularShortBytes)
+
+ default:
+ f.ignoreNextType = true
+ f.format(ve)
+ }
+}
+
+// format is the main workhorse for providing the Formatter interface. It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately. It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+ // Handle invalid reflect values immediately.
+ kind := v.Kind()
+ if kind == reflect.Invalid {
+ f.fs.Write(invalidAngleBytes)
+ return
+ }
+
+ // Handle pointers specially.
+ if kind == reflect.Ptr {
+ f.formatPtr(v)
+ return
+ }
+
+ // Print type information unless already handled elsewhere.
+ if !f.ignoreNextType && f.fs.Flag('#') {
+ f.fs.Write(openParenBytes)
+ f.fs.Write([]byte(v.Type().String()))
+ f.fs.Write(closeParenBytes)
+ }
+ f.ignoreNextType = false
+
+ // Call Stringer/error interfaces if they exist and the handle methods
+ // flag is enabled.
+ if !f.cs.DisableMethods {
+ if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+ if handled := handleMethods(f.cs, f.fs, v); handled {
+ return
+ }
+ }
+ }
+
+ switch kind {
+ case reflect.Invalid:
+ // Do nothing. We should never get here since invalid has already
+ // been handled above.
+
+ case reflect.Bool:
+ printBool(f.fs, v.Bool())
+
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ printInt(f.fs, v.Int(), 10)
+
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ printUint(f.fs, v.Uint(), 10)
+
+ case reflect.Float32:
+ printFloat(f.fs, v.Float(), 32)
+
+ case reflect.Float64:
+ printFloat(f.fs, v.Float(), 64)
+
+ case reflect.Complex64:
+ printComplex(f.fs, v.Complex(), 32)
+
+ case reflect.Complex128:
+ printComplex(f.fs, v.Complex(), 64)
+
+ case reflect.Slice:
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+ fallthrough
+
+ case reflect.Array:
+ f.fs.Write(openBracketBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ numEntries := v.Len()
+ for i := 0; i < numEntries; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.Index(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBracketBytes)
+
+ case reflect.String:
+ f.fs.Write([]byte(v.String()))
+
+ case reflect.Interface:
+ // The only time we should get here is for nil interfaces due to
+ // unpackValue calls.
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ }
+
+ case reflect.Ptr:
+ // Do nothing. We should never get here since pointers have already
+ // been handled above.
+
+ case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
+ f.fs.Write(openMapBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ keys := v.MapKeys()
+ if f.cs.SortKeys {
+ sortValues(keys, f.cs)
+ }
+ for i, key := range keys {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ f.ignoreNextType = true
+ f.format(f.unpackValue(key))
+ f.fs.Write(colonBytes)
+ f.ignoreNextType = true
+ f.format(f.unpackValue(v.MapIndex(key)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeMapBytes)
+
+ case reflect.Struct:
+ numFields := v.NumField()
+ f.fs.Write(openBraceBytes)
+ f.depth++
+ if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+ f.fs.Write(maxShortBytes)
+ } else {
+ vt := v.Type()
+ for i := 0; i < numFields; i++ {
+ if i > 0 {
+ f.fs.Write(spaceBytes)
+ }
+ vtf := vt.Field(i)
+ if f.fs.Flag('+') || f.fs.Flag('#') {
+ f.fs.Write([]byte(vtf.Name))
+ f.fs.Write(colonBytes)
+ }
+ f.format(f.unpackValue(v.Field(i)))
+ }
+ }
+ f.depth--
+ f.fs.Write(closeBraceBytes)
+
+ case reflect.Uintptr:
+ printHexPtr(f.fs, uintptr(v.Uint()))
+
+ case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+ printHexPtr(f.fs, v.Pointer())
+
+ // There were not any other types at the time this code was written, but
+ // fall back to letting the default fmt package handle it if any get added.
+ default:
+ format := f.buildDefaultFormat()
+ if v.CanInterface() {
+ fmt.Fprintf(f.fs, format, v.Interface())
+ } else {
+ fmt.Fprintf(f.fs, format, v.String())
+ }
+ }
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+ f.fs = fs
+
+ // Use standard formatting for verbs that are not v.
+ if verb != 'v' {
+ format := f.constructOrigFormat(verb)
+ fmt.Fprintf(fs, format, f.value)
+ return
+ }
+
+ if f.value == nil {
+ if fs.Flag('#') {
+ fs.Write(interfaceBytes)
+ }
+ fs.Write(nilAngleBytes)
+ return
+ }
+
+ f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+ fs := &formatState{value: v, cs: cs}
+ fs.pointers = make(map[uintptr]int)
+ return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface. As a result, it integrates cleanly with standard fmt package
+printing functions. The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations. Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting. In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly. It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+ return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+ "fmt"
+ "io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the formatted string as a value that satisfies error. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+ return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+ return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+ return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+ return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+ return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the number of bytes written and any write error encountered. See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+ return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+ return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+ return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter. It
+// returns the resulting string. See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+ return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+ formatters = make([]interface{}, len(args))
+ for index, arg := range args {
+ formatters[index] = NewFormatter(arg)
+ }
+ return formatters
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
new file mode 100644
index 0000000..b0ce81b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+var logger CLogger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
new file mode 100644
index 0000000..7b1a123
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach). In the examples below, log refers to this log package.
+//
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+// The log level can be changed and any number of default fields can be added as well. The log level specifies
+// the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+// However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+// passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
+//
+// var logger log.CLogger
+// func init() {
+// logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+// }
+//
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+// in previous step. Here is an example to write an Info log with additional fields:
+//
+// logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+// 3. To dynamically change the log level, you can use
+// a) SetLogLevel from inside your package or
+// b) SetPackageLogLevel from anywhere or
+// c) SetAllLogLevel from anywhere.
+//
+// Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+// Changes to configured loglevels
+
+package log
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path"
+ "runtime"
+ "strings"
+
+ zp "go.uber.org/zap"
+ zc "go.uber.org/zap/zapcore"
+)
+
+type LogLevel int8
+
+const (
+ // DebugLevel logs a message at debug level
+ DebugLevel = LogLevel(iota)
+ // InfoLevel logs a message at info level
+ InfoLevel
+ // WarnLevel logs a message at warning level
+ WarnLevel
+ // ErrorLevel logs a message at error level
+ ErrorLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Context Aware Logger represents an abstract logging interface. Any logging implementation used
+// will need to abide by this interface
+type CLogger interface {
+ Debug(context.Context, ...interface{})
+ Debugln(context.Context, ...interface{})
+ Debugf(context.Context, string, ...interface{})
+ Debugw(context.Context, string, Fields)
+
+ Info(context.Context, ...interface{})
+ Infoln(context.Context, ...interface{})
+ Infof(context.Context, string, ...interface{})
+ Infow(context.Context, string, Fields)
+
+ Warn(context.Context, ...interface{})
+ Warnln(context.Context, ...interface{})
+ Warnf(context.Context, string, ...interface{})
+ Warnw(context.Context, string, Fields)
+
+ Error(context.Context, ...interface{})
+ Errorln(context.Context, ...interface{})
+ Errorf(context.Context, string, ...interface{})
+ Errorw(context.Context, string, Fields)
+
+ Fatal(context.Context, ...interface{})
+ Fatalln(context.Context, ...interface{})
+ Fatalf(context.Context, string, ...interface{})
+ Fatalw(context.Context, string, Fields)
+
+ With(Fields) CLogger
+
+ // The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+ //
+ Warning(context.Context, ...interface{})
+ Warningln(context.Context, ...interface{})
+ Warningf(context.Context, string, ...interface{})
+
+ // V reports whether verbosity level l is at least the requested verbose level.
+ V(l LogLevel) bool
+
+ //Returns the log level of this specific logger
+ GetLogLevel() LogLevel
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *clogger
+var cfg zp.Config
+
+var loggers map[string]*clogger
+var cfgs map[string]zp.Config
+
+type clogger struct {
+ log *zp.SugaredLogger
+ parent *zp.Logger
+ packageName string
+}
+
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
+ switch l {
+ case DebugLevel:
+ return zp.NewAtomicLevelAt(zc.DebugLevel)
+ case InfoLevel:
+ return zp.NewAtomicLevelAt(zc.InfoLevel)
+ case WarnLevel:
+ return zp.NewAtomicLevelAt(zc.WarnLevel)
+ case ErrorLevel:
+ return zp.NewAtomicLevelAt(zc.ErrorLevel)
+ case FatalLevel:
+ return zp.NewAtomicLevelAt(zc.FatalLevel)
+ }
+ return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func logLevelToLevel(l LogLevel) zc.Level {
+ switch l {
+ case DebugLevel:
+ return zc.DebugLevel
+ case InfoLevel:
+ return zc.InfoLevel
+ case WarnLevel:
+ return zc.WarnLevel
+ case ErrorLevel:
+ return zc.ErrorLevel
+ case FatalLevel:
+ return zc.FatalLevel
+ }
+ return zc.ErrorLevel
+}
+
+func levelToLogLevel(l zc.Level) LogLevel {
+ switch l {
+ case zc.DebugLevel:
+ return DebugLevel
+ case zc.InfoLevel:
+ return InfoLevel
+ case zc.WarnLevel:
+ return WarnLevel
+ case zc.ErrorLevel:
+ return ErrorLevel
+ case zc.FatalLevel:
+ return FatalLevel
+ }
+ return ErrorLevel
+}
+
+func StringToLogLevel(l string) (LogLevel, error) {
+ switch strings.ToUpper(l) {
+ case "DEBUG":
+ return DebugLevel, nil
+ case "INFO":
+ return InfoLevel, nil
+ case "WARN":
+ return WarnLevel, nil
+ case "ERROR":
+ return ErrorLevel, nil
+ case "FATAL":
+ return FatalLevel, nil
+ }
+ return 0, errors.New("Given LogLevel is invalid : " + l)
+}
+
+func LogLevelToString(l LogLevel) (string, error) {
+ switch l {
+ case DebugLevel:
+ return "DEBUG", nil
+ case InfoLevel:
+ return "INFO", nil
+ case WarnLevel:
+ return "WARN", nil
+ case ErrorLevel:
+ return "ERROR", nil
+ case FatalLevel:
+ return "FATAL", nil
+ }
+ return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
+ return zp.Config{
+ Level: logLevelToAtomicLevel(level),
+ Encoding: outputType,
+ Development: true,
+ OutputPaths: []string{"stdout"},
+ ErrorOutputPaths: []string{"stderr"},
+ InitialFields: defaultFields,
+ EncoderConfig: zc.EncoderConfig{
+ LevelKey: "level",
+ MessageKey: "msg",
+ TimeKey: "ts",
+ CallerKey: "caller",
+ StacktraceKey: "stacktrace",
+ LineEnding: zc.DefaultLineEnding,
+ EncodeLevel: zc.LowercaseLevelEncoder,
+ EncodeTime: zc.ISO8601TimeEncoder,
+ EncodeDuration: zc.SecondsDurationEncoder,
+ EncodeCaller: zc.ShortCallerEncoder,
+ },
+ }
+}
+
+func ConstructZapConfig(outputType string, level LogLevel, fields Fields) zp.Config {
+ return getDefaultConfig(outputType, level, fields)
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked. This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (CLogger, error) {
+ // Build a custom config using zap
+ cfg = getDefaultConfig(outputType, level, defaultFields)
+
+ l, err := cfg.Build(zp.AddCallerSkip(1))
+ if err != nil {
+ return nil, err
+ }
+
+ defaultLogger = &clogger{
+ log: l.Sugar(),
+ parent: l,
+ }
+
+ return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map. Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger. If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func RegisterPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (CLogger, error) {
+ if cfgs == nil {
+ cfgs = make(map[string]zp.Config)
+ }
+ if loggers == nil {
+ loggers = make(map[string]*clogger)
+ }
+
+ var pkgName string
+ for _, name := range pkgNames {
+ pkgName = name
+ break
+ }
+ if pkgName == "" {
+ pkgName, _, _, _ = getCallerInfo()
+ }
+
+ if _, exist := loggers[pkgName]; exist {
+ return loggers[pkgName], nil
+ }
+
+ cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+ l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
+ if err != nil {
+ return nil, err
+ }
+
+ loggers[pkgName] = &clogger{
+ log: l.Sugar(),
+ parent: l,
+ packageName: pkgName,
+ }
+ return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+ for pkgName, cfg := range cfgs {
+ for k, v := range defaultFields {
+ if cfg.InitialFields == nil {
+ cfg.InitialFields = make(map[string]interface{})
+ }
+ cfg.InitialFields[k] = v
+ }
+ l, err := cfg.Build(zp.AddCallerSkip(1))
+ if err != nil {
+ return err
+ }
+
+ // Update the existing zap logger instance
+ loggers[pkgName].log = l.Sugar()
+ loggers[pkgName].parent = l
+ }
+ return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+ i := 0
+ keys := make([]string, len(loggers))
+ for k := range loggers {
+ keys[i] = k
+ i++
+ }
+ return keys
+}
+
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
+ pkgName, _, _, _ := getCallerInfo()
+ if _, exist := loggers[pkgName]; !exist {
+ return fmt.Errorf("package-%s-not-registered", pkgName)
+ }
+
+ // Build a new logger
+ if _, exist := cfgs[pkgName]; !exist {
+ return fmt.Errorf("config-%s-not-registered", pkgName)
+ }
+
+ cfg := cfgs[pkgName]
+ for k, v := range defaultFields {
+ if cfg.InitialFields == nil {
+ cfg.InitialFields = make(map[string]interface{})
+ }
+ cfg.InitialFields[k] = v
+ }
+ l, err := cfg.Build(zp.AddCallerSkip(1))
+ if err != nil {
+ return err
+ }
+
+ // Update the existing zap logger instance
+ loggers[pkgName].log = l.Sugar()
+ loggers[pkgName].parent = l
+
+ return nil
+}
+
+func setLevel(cfg zp.Config, level LogLevel) {
+ switch level {
+ case DebugLevel:
+ cfg.Level.SetLevel(zc.DebugLevel)
+ case InfoLevel:
+ cfg.Level.SetLevel(zc.InfoLevel)
+ case WarnLevel:
+ cfg.Level.SetLevel(zc.WarnLevel)
+ case ErrorLevel:
+ cfg.Level.SetLevel(zc.ErrorLevel)
+ case FatalLevel:
+ cfg.Level.SetLevel(zc.FatalLevel)
+ default:
+ cfg.Level.SetLevel(zc.ErrorLevel)
+ }
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level. This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level LogLevel) {
+ // Get proper config
+ if cfg, ok := cfgs[packageName]; ok {
+ setLevel(cfg, level)
+ }
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level LogLevel) {
+ // Get proper config
+ for _, cfg := range cfgs {
+ setLevel(cfg, level)
+ }
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
+ var name string
+ if len(packageName) == 1 {
+ name = packageName[0]
+ } else {
+ name, _, _, _ = getCallerInfo()
+ }
+ if cfg, ok := cfgs[name]; ok {
+ return levelToLogLevel(cfg.Level.Level()), nil
+ }
+ return 0, fmt.Errorf("unknown-package-%s", name)
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() LogLevel {
+ return levelToLogLevel(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level LogLevel) error {
+ pkgName, _, _, _ := getCallerInfo()
+ if _, exist := cfgs[pkgName]; !exist {
+ return fmt.Errorf("unregistered-package-%s", pkgName)
+ }
+ cfg := cfgs[pkgName]
+ setLevel(cfg, level)
+ return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level LogLevel) {
+ setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+ for _, logger := range loggers {
+ if logger != nil {
+ if logger.parent != nil {
+ if err := logger.parent.Sync(); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ if defaultLogger != nil {
+ if defaultLogger.parent != nil {
+ if err := defaultLogger.parent.Sync(); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+ // Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+ // filename, then first look for the last log.go filename and then grab the caller info one level higher.
+ maxLevel := 3
+ skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+ pc := make([]uintptr, maxLevel)
+ n := runtime.Callers(skiplevel, pc)
+ packageName := ""
+ funcName := ""
+ fileName := ""
+ var line int
+ if n == 0 {
+ return packageName, fileName, funcName, line
+ }
+ frames := runtime.CallersFrames(pc[:n])
+ var frame runtime.Frame
+ var foundFrame runtime.Frame
+ more := true
+ for more {
+ frame, more = frames.Next()
+ _, fileName = path.Split(frame.File)
+ if fileName != "log.go" {
+ foundFrame = frame // First frame after log.go in the frame stack
+ break
+ }
+ }
+ parts := strings.Split(foundFrame.Function, ".")
+ pl := len(parts)
+ if pl >= 2 {
+ funcName = parts[pl-1]
+ if parts[pl-2][0] == '(' {
+ packageName = strings.Join(parts[0:pl-2], ".")
+ } else {
+ packageName = strings.Join(parts[0:pl-1], ".")
+ }
+ }
+
+ if strings.HasSuffix(packageName, ".init") {
+ packageName = strings.TrimSuffix(packageName, ".init")
+ }
+
+ if strings.HasSuffix(fileName, ".go") {
+ fileName = strings.TrimSuffix(fileName, ".go")
+ }
+
+ return packageName, fileName, funcName, foundFrame.Line
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l clogger) With(keysAndValues Fields) CLogger {
+ return clogger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l clogger) Debug(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
+ if l.V(DebugLevel) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+ }
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l clogger) Info(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+ //msg := fmt.Sprintln(args...)
+ //l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
+ if l.V(InfoLevel) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+ }
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l clogger) Warn(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
+ if l.V(WarnLevel) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+ }
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l clogger) Error(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
+ if l.V(ErrorLevel) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+ }
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
+ if l.V(FatalLevel) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+ }
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l clogger) Warning(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
+ l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l clogger) V(level LogLevel) bool {
+ return l.parent.Core().Enabled(logLevelToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l clogger) GetLogLevel() LogLevel {
+ return levelToLogLevel(cfgs[l.packageName].Level.Level())
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
new file mode 100644
index 0000000..82c3d7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// File contains utility functions to support Open Tracing in conjunction with
+// Enhanced Logging based on context propagation
+
+package log
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "github.com/opentracing/opentracing-go"
+ jtracing "github.com/uber/jaeger-client-go"
+ jcfg "github.com/uber/jaeger-client-go/config"
+ "io"
+ "os"
+ "strings"
+ "sync"
+)
+
+const (
+ RootSpanNameKey = "op-name"
+)
+
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+ isTracePublishingEnabled bool
+ isLogCorrelationEnabled bool
+ componentName string // Name of component extracted from ENV variable
+ activeTraceAgentAddress string
+ lock sync.Mutex
+}
+
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+ return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+ return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+ return opentracing.GlobalTracer().Extract(format, carrier)
+}
+
+// Jaeger complaint Logger instance to redirect logs to Default Logger
+type traceLogger struct {
+ logger *clogger
+}
+
+func (tl traceLogger) Error(msg string) {
+ tl.logger.Error(context.Background(), msg)
+}
+
+func (tl traceLogger) Infof(msg string, args ...interface{}) {
+ // Tracing logs should be performed only at Debug Verbosity
+ tl.logger.Debugf(context.Background(), msg, args...)
+}
+
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+ currentActiveTracer := opentracing.GlobalTracer()
+ if currentActiveTracer != nil {
+ if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+ jTracer.Close()
+ }
+ }
+
+ return nil
+}
+
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+ lfm.componentName = os.Getenv("COMPONENT_NAME")
+ if lfm.componentName == "" {
+ return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+ }
+
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ // Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+ if !tracePublishEnabled && !logCorrelationEnabled {
+ logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+ lfm.isTracePublishingEnabled = false
+ lfm.isLogCorrelationEnabled = false
+ opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+ return traceCloser{}, nil
+ }
+
+ tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+ if err != nil {
+ return nil, err
+ }
+
+ // Initialize variables representing Active Status
+ opentracing.SetGlobalTracer(tracer)
+ lfm.isTracePublishingEnabled = tracePublishEnabled
+ lfm.activeTraceAgentAddress = traceAgentAddress
+ lfm.isLogCorrelationEnabled = logCorrelationEnabled
+ return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+ currentActiveTracer := opentracing.GlobalTracer()
+ opentracing.SetGlobalTracer(tracer)
+
+ if currentActiveTracer != nil {
+ if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+ jTracer.Close()
+ }
+ }
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ if isEnabled == lfm.isLogCorrelationEnabled {
+ logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+ return
+ }
+
+ if isEnabled {
+ // Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+ // Continue using the earlier tracer instance in case of any error
+ if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+ tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+ return
+ }
+
+ lfm.replaceActiveTracer(tracer)
+ }
+
+ lfm.isLogCorrelationEnabled = true
+ logger.Info(context.Background(), "Log Correlation has been enabled")
+
+ } else {
+ // Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+ if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+ lfm.replaceActiveTracer(opentracing.NoopTracer{})
+ }
+
+ lfm.isLogCorrelationEnabled = false
+ logger.Info(context.Background(), "Log Correlation has been disabled")
+ }
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+ lfm.lock.Lock()
+ defer lfm.lock.Unlock()
+
+ if isEnabled == lfm.isTracePublishingEnabled {
+ logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+ return
+ }
+
+ if isEnabled {
+ // Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+ // This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+ // Disable-Enable of Tracing
+ tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+ return
+ }
+ lfm.replaceActiveTracer(tracer)
+
+ lfm.isTracePublishingEnabled = true
+ logger.Info(context.Background(), "Tracing Publishing has been enabled")
+ } else {
+ // Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+ if !lfm.isLogCorrelationEnabled {
+ lfm.replaceActiveTracer(opentracing.NoopTracer{})
+ } else {
+ // Else construct a new Jaeger Instance with publishing disabled
+ tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+ if err != nil {
+ logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+ return
+ }
+ lfm.replaceActiveTracer(tracer)
+ }
+
+ lfm.isTracePublishingEnabled = false
+ logger.Info(context.Background(), "Tracing Publishing has been disabled")
+ }
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+ cfg := jcfg.Configuration{ServiceName: lfm.componentName}
+
+ var err error
+ var jReporterConfig jcfg.ReporterConfig
+ var jReporterCfgOption jtracing.Reporter
+
+ logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
+
+ // Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+ jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+ jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+ if err != nil {
+ if !fallbackToLoopbackAllowed {
+ return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
+ }
+
+ logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+ Fields{"error": err, "address": traceAgentAddress})
+ // The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+ // It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+ // Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+ tracePublishEnabled = false
+
+ jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
+ jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+ if err != nil {
+ return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+ }
+ }
+
+ // To start with, we are using Constant Sampling type
+ samplerParam := 0 // 0: Do not publish span, 1: Publish
+ if tracePublishEnabled {
+ samplerParam = 1
+ }
+ jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
+ jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
+ if err != nil {
+ return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
+ }
+
+ return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+}
+
+func TerminateTracing(c io.Closer) {
+ err := c.Close()
+ if err != nil {
+ logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+ }
+}
+
+// Extracts details of Execution Context as log fields from the Tracing Span injected into the
+// context instance. Following log fields are extracted:
+// 1. Operation Name : key as 'op-name' and value as Span operation name
+// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
+//
+// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
+//
+// If no Span is found associated with context, blank slice is returned without any log fields
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+ if !lfm.isLogCorrelationEnabled {
+ return make([]interface{}, 0)
+ }
+
+ attrMap := make(map[string]interface{})
+
+ if ctx != nil {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ if jspan, ok := span.(*jtracing.Span); ok {
+ // Add Log fields for operation identified by Root Level Span (Trace)
+ opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
+ opName := jspan.BaggageItem(RootSpanNameKey)
+
+ taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
+ taskName := jspan.OperationName()
+
+ if opName == "" {
+ span.SetBaggageItem(RootSpanNameKey, taskName)
+ opName = taskName
+ }
+
+ attrMap["op-id"] = opId
+ attrMap["op-name"] = opName
+
+ // Add Log fields for task identified by Current Span, if it is different
+ // than operation
+ if taskId != opId {
+ attrMap["task-id"] = taskId
+ attrMap["task-name"] = taskName
+ }
+
+ for k, v := range jspan.Tags() {
+ // Ignore the special tags added by Jaeger, middleware (sampler.type, span.*) present in the span
+ if strings.HasPrefix(k, "sampler.") || strings.HasPrefix(k, "span.") || k == "component" {
+ continue
+ }
+
+ attrMap[k] = v
+ }
+
+ processBaggageItems := func(k, v string) bool {
+ if k != "rpc-span-name" {
+ attrMap[k] = v
+ }
+ return true
+ }
+
+ jspan.SpanContext().ForeachBaggageItem(processBaggageItems)
+ }
+ }
+ }
+
+ return serializeMap(attrMap)
+}
+
+// Method to inject additional log fields into Span e.g. device-id
+func EnrichSpan(ctx context.Context, keyAndValues ...Fields) {
+ span := opentracing.SpanFromContext(ctx)
+ if span != nil {
+ if jspan, ok := span.(*jtracing.Span); ok {
+ // Inject as a BaggageItem when the Span is the Root Span so that it propagates
+ // across the components along with Root Span (called as Trace)
+ // Else, inject as a Tag so that it is attached to the Child Task
+ isRootSpan := false
+ if jspan.SpanContext().TraceID().String() == jspan.SpanContext().SpanID().String() {
+ isRootSpan = true
+ }
+
+ for _, field := range keyAndValues {
+ for k, v := range field {
+ if isRootSpan {
+ span.SetBaggageItem(k, v.(string))
+ } else {
+ span.SetTag(k, v)
+ }
+ }
+ }
+ }
+ }
+}
+
+// Method to inject Error into the Span in event of any operation failure
+func MarkSpanError(ctx context.Context, err error) {
+ span := opentracing.SpanFromContext(ctx)
+ if span != nil {
+ span.SetTag("error", true)
+ span.SetTag("err", err)
+ }
+}
+
+// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
+// operation in Synchronous or Asynchronous mode (go routine), such as following:
+// 1. Start of all implemented External API methods unless using a interceptor for auto-injection of Span (Server side impl)
+// 2. Just before calling an Third-Party lib which is invoking a External API (etcd, kafka)
+// 3. In start of a Go Routine responsible for performing a major task involving significant duration
+// 4. Any method which is suspected to be time consuming...
+func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+ if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+ return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+ }
+
+ parentSpan := opentracing.SpanFromContext(ctx)
+ childSpan, newCtx := opentracing.StartSpanFromContext(ctx, taskName)
+
+ if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+ childSpan.SetBaggageItem(RootSpanNameKey, taskName)
+ }
+
+ EnrichSpan(newCtx, keyAndValues...)
+ return childSpan, newCtx
+}
+
+// Creates a Async Child Span with Follows-From relationship from Parent Span embedded in passed context.
+// Should be used only in scenarios when
+// a) There is dis-continuation in execution and thus result of Child span does not affect the Parent flow at all
+// b) The execution of Child Span is guaranteed to start after the completion of Parent Span
+// In case of any confusion, use CreateChildSpan method
+// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
+// a channel etc.
+func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+ if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+ return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+ }
+
+ var asyncSpan opentracing.Span
+ var newCtx context.Context
+
+ parentSpan := opentracing.SpanFromContext(ctx)
+
+ // We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
+ if parentSpan == nil {
+ logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+ asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
+ } else {
+ // Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
+ asyncSpan, newCtx = opentracing.StartSpanFromContext(context.Background(), taskName, opentracing.FollowsFrom(parentSpan.Context()))
+ }
+
+ if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+ asyncSpan.SetBaggageItem(RootSpanNameKey, taskName)
+ }
+
+ EnrichSpan(newCtx, keyAndValues...)
+ return asyncSpan, newCtx
+}
+
+// Extracts the span from Source context and injects into the supplied Target context.
+// This should be used in situations wherein we are calling a time-sensitive operation (etcd update) and hence
+// had a context.Background() used earlier to avoid any cancellation/timeout of operation by passed context.
+// This will allow propagation of span with a different base context (and not the original context)
+func WithSpanFromContext(targetCtx, sourceCtx context.Context) context.Context {
+ span := opentracing.SpanFromContext(sourceCtx)
+ return opentracing.ContextWithSpan(targetCtx, span)
+}
+
+// Utility method to convert log Fields into array of interfaces expected by zap logger methods
+func serializeMap(fields Fields) []interface{} {
+ data := make([]interface{}, len(fields)*2)
+ i := 0
+ for k, v := range fields {
+ data[i] = k
+ data[i+1] = v
+ i = i + 2
+ }
+ return data
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
new file mode 100644
index 0000000..6508fd4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+ "github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+ // Setup this package so that it's log level can be modified at run time
+ var err error
+ logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
new file mode 100644
index 0000000..7ba1a57
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "sync"
+
+ "github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+ // ServiceStatusUnknown initial state of services
+ ServiceStatusUnknown ServiceStatus = iota
+
+ // ServiceStatusPreparing to optionally be used for prep, such as connecting
+ ServiceStatusPreparing
+
+ // ServiceStatusPrepared to optionally be used when prep is complete, but before run
+ ServiceStatusPrepared
+
+ // ServiceStatusRunning service is functional
+ ServiceStatusRunning
+
+ // ServiceStatusStopped service has stopped, but not because of error
+ ServiceStatusStopped
+
+ // ServiceStatusFailed service has stopped because of an error
+ ServiceStatusFailed
+
+ // ServiceStatusNotReady service has started but is unable to accept requests
+ ServiceStatusNotReady
+)
+
+const (
+ // ProbeContextKey value of context key to fetch probe
+ ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+ switch s {
+ default:
+ fallthrough
+ case ServiceStatusUnknown:
+ return "Unknown"
+ case ServiceStatusPreparing:
+ return "Preparing"
+ case ServiceStatusPrepared:
+ return "Prepared"
+ case ServiceStatusRunning:
+ return "Running"
+ case ServiceStatusStopped:
+ return "Stopped"
+ case ServiceStatusFailed:
+ return "Failed"
+ case ServiceStatusNotReady:
+ return "NotReady"
+ }
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+ Name string
+ Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+ readyFunc func(map[string]ServiceStatus) bool
+ healthFunc func(map[string]ServiceStatus) bool
+
+ mutex sync.RWMutex
+ status map[string]ServiceStatus
+ isReady bool
+ isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+ p.readyFunc = readyFunc
+ return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+ p.healthFunc = healthFunc
+ return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ if p.status == nil {
+ p.status = make(map[string]ServiceStatus)
+ }
+ for _, name := range names {
+ if _, ok := p.status[name]; !ok {
+ p.status[name] = ServiceStatusUnknown
+ logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
+ }
+ }
+
+ if p.readyFunc != nil {
+ p.isReady = p.readyFunc(p.status)
+ } else {
+ p.isReady = defaultReadyFunc(p.status)
+ }
+
+ if p.healthFunc != nil {
+ p.isHealthy = p.healthFunc(p.status)
+ } else {
+ p.isHealthy = defaultHealthFunc(p.status)
+ }
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+ if p.status == nil {
+ p.status = make(map[string]ServiceStatus)
+ }
+
+ // if status hasn't changed, avoid doing useless work
+ existingStatus, ok := p.status[name]
+ if ok && (existingStatus == status) {
+ return
+ }
+
+ p.status[name] = status
+ if p.readyFunc != nil {
+ p.isReady = p.readyFunc(p.status)
+ } else {
+ p.isReady = defaultReadyFunc(p.status)
+ }
+
+ if p.healthFunc != nil {
+ p.isHealthy = p.healthFunc(p.status)
+ } else {
+ p.isHealthy = defaultHealthFunc(p.status)
+ }
+ logger.Debugw(ctx, "probe-service-status-updated",
+ log.Fields{
+ "service-name": name,
+ "status": status.String(),
+ "ready": p.isReady,
+ "health": p.isHealthy,
+ })
+}
+
+func (p *Probe) GetStatus(name string) ServiceStatus {
+ p.mutex.Lock()
+ defer p.mutex.Unlock()
+
+ if p.status == nil {
+ p.status = make(map[string]ServiceStatus)
+ }
+
+ currentStatus, ok := p.status[name]
+ if ok {
+ return currentStatus
+ }
+
+ return ServiceStatusUnknown
+}
+
+func GetProbeFromContext(ctx context.Context) *Probe {
+ if ctx != nil {
+ if value := ctx.Value(ProbeContextKey); value != nil {
+ if p, ok := value.(*Probe); ok {
+ return p
+ }
+ }
+ }
+ return nil
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+ p := GetProbeFromContext(ctx)
+ if p != nil {
+ p.UpdateStatus(ctx, name, status)
+ }
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ if p.isReady {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusTeapot)
+ }
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ if p.isHealthy {
+ w.WriteHeader(http.StatusOK)
+ } else {
+ w.WriteHeader(http.StatusTeapot)
+ }
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+ ctx := context.Background()
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ w.Header().Set("Content-Type", "application/json")
+ if _, err := w.Write([]byte("{")); err != nil {
+ logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ comma := ""
+ for c, s := range p.status {
+ if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+ logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ comma = ", "
+ }
+ if _, err := w.Write([]byte("}")); err != nil {
+ logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
+ mux := http.NewServeMux()
+
+ // Returns the result of the readyFunc calculation
+ mux.HandleFunc("/readz", p.readzFunc)
+
+ // Returns the result of the healthFunc calculation
+ mux.HandleFunc("/healthz", p.healthzFunc)
+
+ // Returns the details of the services and their status as JSON
+ mux.HandleFunc("/detailz", p.detailzFunc)
+ s := &http.Server{
+ Addr: address,
+ Handler: mux,
+ }
+ logger.Fatal(ctx, s.ListenAndServe())
+}
+
+func (p *Probe) IsReady() bool {
+ p.mutex.RLock()
+ defer p.mutex.RUnlock()
+ return p.isReady
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+ if len(services) == 0 {
+ return false
+ }
+ for _, status := range services {
+ if status != ServiceStatusRunning {
+ return false
+ }
+ }
+ return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+ if len(services) == 0 {
+ return false
+ }
+ for _, status := range services {
+ if status == ServiceStatusStopped || status == ServiceStatusFailed {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/version/version.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/version/version.go
new file mode 100644
index 0000000..49c0b10
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/version/version.go
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package version
+
+import (
+ "fmt"
+ "strings"
+)
+
+// Default build-time variable.
+// These values can (should) be overridden via ldflags when built with
+// `make`
+var (
+ version = "unknown-version"
+ goVersion = "unknown-goversion"
+ vcsRef = "unknown-vcsref"
+ vcsDirty = "unknown-vcsdirty"
+ buildTime = "unknown-buildtime"
+ os = "unknown-os"
+ arch = "unknown-arch"
+)
+
+type VersionInfoType struct {
+ Version string `json:"version"`
+ GoVersion string `json:"goversion"`
+ VcsRef string `json:"vcsref"`
+ VcsDirty string `json:"vcsdirty"`
+ BuildTime string `json:"buildtime"`
+ Os string `json:"os"`
+ Arch string `json:"arch"`
+}
+
+var VersionInfo VersionInfoType
+
+func init() {
+ VersionInfo = VersionInfoType{
+ Version: version,
+ VcsRef: vcsRef,
+ VcsDirty: vcsDirty,
+ GoVersion: goVersion,
+ Os: os,
+ Arch: arch,
+ BuildTime: buildTime,
+ }
+}
+
+func (v VersionInfoType) String(indent string) string {
+ builder := strings.Builder{}
+
+ builder.WriteString(fmt.Sprintf("%sVersion: %s\n", indent, VersionInfo.Version))
+ builder.WriteString(fmt.Sprintf("%sGoVersion: %s\n", indent, VersionInfo.GoVersion))
+ builder.WriteString(fmt.Sprintf("%sVCS Ref: %s\n", indent, VersionInfo.VcsRef))
+ builder.WriteString(fmt.Sprintf("%sVCS Dirty: %s\n", indent, VersionInfo.VcsDirty))
+ builder.WriteString(fmt.Sprintf("%sBuilt: %s\n", indent, VersionInfo.BuildTime))
+ builder.WriteString(fmt.Sprintf("%sOS/Arch: %s/%s\n", indent, VersionInfo.Os, VersionInfo.Arch))
+ return builder.String()
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 0000000..c57100a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 0000000..b950e42
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+ include:
+ - go: "1.13.x"
+ - go: "1.14.x"
+ - go: "tip"
+ env:
+ - LINT=true
+ - COVERAGE=true
+
+install:
+ - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+ - go get -u github.com/stretchr/testify/...
+
+script:
+ - make test
+ - go build ./...
+ - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+ - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 0000000..d3bfcf6
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,63 @@
+Changes by Version
+==================
+
+
+1.2.0 (2020-07-01)
+-------------------
+
+* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
+* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
+* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
+* Add Go module support (#215) -- Zaba505
+* Make SetTag helper types in ext public (#229) -- Blake Edwards
+* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
+* Improve noop impementation (#223) -- chanxuehong
+* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
+* Fix typo in comments (#222) -- meteorlxy
+* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅
+* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
+
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
+- Update license to Apache 2.0 (#181) <Andrea Kao>
+- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
+- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
+- Fix race condition in MockSpan.Context() (#170) <Brad>
+- Add PeerHostIPv4.SetString() (#155) <NeoCN>
+- Add a Noop log field type to log to allow for optional fields (#150) <Matt Ho>
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139) <Rustam Zagirov>
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments <Ben Sigelman>
+- Address race in nextMockID() (#123) <bill fumerola>
+- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
+- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
+- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
+- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 0000000..f002734
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016 The OpenTracing Authors
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 0000000..62abb63
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+ go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+ go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+ go fmt ./...
+ golint ./...
+ @# Run again with magic to exit non-zero if golint outputs anything.
+ @! (golint ./... | read dummy)
+ go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 0000000..6ef1d7c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+ import "github.com/opentracing/opentracing-go"
+ import ".../some_tracing_impl"
+
+ func main() {
+ opentracing.SetGlobalTracer(
+ // tracing impl specific:
+ some_tracing_impl.New(...),
+ )
+ ...
+ }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+ func xyz(ctx context.Context, ...) {
+ ...
+ span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+ defer span.Finish()
+ span.LogFields(
+ log.String("event", "soft error"),
+ log.String("type", "cache timeout"),
+ log.Int("waited.millis", 1500))
+ ...
+ }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+ func xyz() {
+ ...
+ sp := opentracing.StartSpan("operation_name")
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+ func xyz(parentSpan opentracing.Span, ...) {
+ ...
+ sp := opentracing.StartSpan(
+ "operation_name",
+ opentracing.ChildOf(parentSpan.Context()))
+ defer sp.Finish()
+ ...
+ }
+```
+
+#### Serializing to the wire
+
+```go
+ func makeSomeRequest(ctx context.Context) ... {
+ if span := opentracing.SpanFromContext(ctx); span != nil {
+ httpClient := &http.Client{}
+ httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+ // Transmit the span's TraceContext as HTTP headers on our
+ // outbound request.
+ opentracing.GlobalTracer().Inject(
+ span.Context(),
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+ resp, err := httpClient.Do(httpReq)
+ ...
+ }
+ ...
+ }
+```
+
+#### Deserializing from the wire
+
+```go
+ http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+ var serverSpan opentracing.Span
+ appSpecificOperationName := ...
+ wireContext, err := opentracing.GlobalTracer().Extract(
+ opentracing.HTTPHeaders,
+ opentracing.HTTPHeadersCarrier(req.Header))
+ if err != nil {
+ // Optionally record something about err here
+ }
+
+ // Create the span referring to the RPC client if available.
+ // If wireContext == nil, a root span will be created.
+ serverSpan = opentracing.StartSpan(
+ appSpecificOperationName,
+ ext.RPCServerOption(wireContext))
+
+ defer serverSpan.Finish()
+
+ ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+ ...
+ }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field. For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+ func Customer(order *Order) log.Field {
+ if os.Getenv("ENVIRONMENT") == "dev" {
+ return log.String("customer", order.Customer.ID)
+ }
+ return log.Noop()
+ }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
new file mode 100644
index 0000000..e11977e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext.go
@@ -0,0 +1,24 @@
+package opentracing
+
+import (
+ "context"
+)
+
+// TracerContextWithSpanExtension is an extension interface that the
+// implementation of the Tracer interface may want to implement. It
+// allows to have some control over the go context when the
+// ContextWithSpan is invoked.
+//
+// The primary purpose of this extension are adapters from opentracing
+// API to some other tracing API.
+type TracerContextWithSpanExtension interface {
+ // ContextWithSpanHook gets called by the ContextWithSpan
+ // function, when the Tracer implementation also implements
+ // this interface. It allows to put extra information into the
+ // context and make it available to the callers of the
+ // ContextWithSpan.
+ //
+ // This hook is invoked before the ContextWithSpan function
+ // actually puts the span into the context.
+ ContextWithSpanHook(ctx context.Context, span Span) context.Context
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
new file mode 100644
index 0000000..8282bd7
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go
@@ -0,0 +1,17 @@
+package ext
+
+import (
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// LogError sets the error=true tag on the Span and logs err as an "error" event.
+func LogError(span opentracing.Span, err error, fields ...log.Field) {
+ Error.Set(span, true)
+ ef := []log.Field{
+ log.Event("error"),
+ log.Error(err),
+ }
+ ef = append(ef, fields...)
+ span.LogFields(ef...)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 0000000..a414b59
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,215 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+// span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+// TagName.Set(span, value)
+//
+var (
+ //////////////////////////////////////////////////////////////////////
+ // SpanKind (client/server or producer/consumer)
+ //////////////////////////////////////////////////////////////////////
+
+ // SpanKind hints at relationship between spans, e.g. client/server
+ SpanKind = spanKindTagName("span.kind")
+
+ // SpanKindRPCClient marks a span representing the client-side of an RPC
+ // or other remote call
+ SpanKindRPCClientEnum = SpanKindEnum("client")
+ SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+ // SpanKindRPCServer marks a span representing the server-side of an RPC
+ // or other remote call
+ SpanKindRPCServerEnum = SpanKindEnum("server")
+ SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+ // SpanKindProducer marks a span representing the producer-side of a
+ // message bus
+ SpanKindProducerEnum = SpanKindEnum("producer")
+ SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+ // SpanKindConsumer marks a span representing the consumer-side of a
+ // message bus
+ SpanKindConsumerEnum = SpanKindEnum("consumer")
+ SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+ //////////////////////////////////////////////////////////////////////
+ // Component name
+ //////////////////////////////////////////////////////////////////////
+
+ // Component is a low-cardinality identifier of the module, library,
+ // or package that is generating a span.
+ Component = StringTagName("component")
+
+ //////////////////////////////////////////////////////////////////////
+ // Sampling hint
+ //////////////////////////////////////////////////////////////////////
+
+ // SamplingPriority determines the priority of sampling this Span.
+ SamplingPriority = Uint16TagName("sampling.priority")
+
+ //////////////////////////////////////////////////////////////////////
+ // Peer tags. These tags can be emitted by either client-side or
+ // server-side to describe the other side/service in a peer-to-peer
+ // communications, like an RPC call.
+ //////////////////////////////////////////////////////////////////////
+
+ // PeerService records the service name of the peer.
+ PeerService = StringTagName("peer.service")
+
+ // PeerAddress records the address name of the peer. This may be a "ip:port",
+ // a bare "hostname", a FQDN or even a database DSN substring
+ // like "mysql://username@127.0.0.1:3306/dbname"
+ PeerAddress = StringTagName("peer.address")
+
+ // PeerHostname records the host name of the peer
+ PeerHostname = StringTagName("peer.hostname")
+
+ // PeerHostIPv4 records IP v4 host address of the peer
+ PeerHostIPv4 = IPv4TagName("peer.ipv4")
+
+ // PeerHostIPv6 records IP v6 host address of the peer
+ PeerHostIPv6 = StringTagName("peer.ipv6")
+
+ // PeerPort records port number of the peer
+ PeerPort = Uint16TagName("peer.port")
+
+ //////////////////////////////////////////////////////////////////////
+ // HTTP Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // HTTPUrl should be the URL of the request being handled in this segment
+ // of the trace, in standard URI format. The protocol is optional.
+ HTTPUrl = StringTagName("http.url")
+
+ // HTTPMethod is the HTTP method of the request, and is case-insensitive.
+ HTTPMethod = StringTagName("http.method")
+
+ // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+ // HTTP response.
+ HTTPStatusCode = Uint16TagName("http.status_code")
+
+ //////////////////////////////////////////////////////////////////////
+ // DB Tags
+ //////////////////////////////////////////////////////////////////////
+
+ // DBInstance is database instance name.
+ DBInstance = StringTagName("db.instance")
+
+ // DBStatement is a database statement for the given database type.
+ // It can be a query or a prepared statement (i.e., before substitution).
+ DBStatement = StringTagName("db.statement")
+
+ // DBType is a database type. For any SQL database, "sql".
+ // For others, the lower-case database category, e.g. "redis"
+ DBType = StringTagName("db.type")
+
+ // DBUser is a username for accessing database.
+ DBUser = StringTagName("db.user")
+
+ //////////////////////////////////////////////////////////////////////
+ // Message Bus Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // MessageBusDestination is an address at which messages can be exchanged
+ MessageBusDestination = StringTagName("message_bus.destination")
+
+ //////////////////////////////////////////////////////////////////////
+ // Error Tag
+ //////////////////////////////////////////////////////////////////////
+
+ // Error indicates that operation represented by the span resulted in an error.
+ Error = BoolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+ span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+ clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+ if r.clientContext != nil {
+ opentracing.ChildOf(r.clientContext).Apply(o)
+ }
+ SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+ return rpcServerOption{client}
+}
+
+// ---
+
+// StringTagName is a common tag name to be set to a string value
+type StringTagName string
+
+// Set adds a string tag to the `span`
+func (tag StringTagName) Set(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint32TagName is a common tag name to be set to a uint32 value
+type Uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint16TagName is a common tag name to be set to a uint16 value
+type Uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
+ span.SetTag(string(tag), value)
+}
+
+// ---
+
+// BoolTagName is a common tag name to be set to a bool value
+type BoolTagName string
+
+// Set adds a bool tag to the `span`
+func (tag BoolTagName) Set(span opentracing.Span, value bool) {
+ span.SetTag(string(tag), value)
+}
+
+// IPv4TagName is a common tag name to be set to an ipv4 value
+type IPv4TagName string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
+ span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
+ span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 0000000..4f7066a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+ tracer Tracer
+ isRegistered bool
+}
+
+var (
+ globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+ globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+ return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+ SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+ return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod
new file mode 100644
index 0000000..bf48bb5
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.mod
@@ -0,0 +1,5 @@
+module github.com/opentracing/opentracing-go
+
+go 1.14
+
+require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum
new file mode 100644
index 0000000..4347755
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.sum
@@ -0,0 +1,7 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 0000000..1831bc9
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,65 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// the span. If span is nil, a new context without an active span is returned.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+ if span != nil {
+ if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
+ ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
+ }
+ }
+ return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+ val := ctx.Value(activeSpanKey)
+ if sp, ok := val.(Span); ok {
+ return sp
+ }
+ return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+// SomeFunction(ctx context.Context, ...) {
+// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+// defer sp.Finish()
+// ...
+// }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+ if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+ opts = append(opts, ChildOf(parentSpan.Context()))
+ }
+ span := tracer.StartSpan(operationName, opts...)
+ return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 0000000..f222ded
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,282 @@
+package log
+
+import (
+ "fmt"
+ "math"
+)
+
+type fieldType int
+
+const (
+ stringType fieldType = iota
+ boolType
+ intType
+ int32Type
+ uint32Type
+ int64Type
+ uint64Type
+ float32Type
+ float64Type
+ errorType
+ objectType
+ lazyLoggerType
+ noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+ key string
+ fieldType fieldType
+ numericVal int64
+ stringVal string
+ interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+ return Field{
+ key: key,
+ fieldType: stringType,
+ stringVal: val,
+ }
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+ var numericVal int64
+ if val {
+ numericVal = 1
+ }
+ return Field{
+ key: key,
+ fieldType: boolType,
+ numericVal: numericVal,
+ }
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+ return Field{
+ key: key,
+ fieldType: intType,
+ numericVal: int64(val),
+ }
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+ return Field{
+ key: key,
+ fieldType: int32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+ return Field{
+ key: key,
+ fieldType: int64Type,
+ numericVal: val,
+ }
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+ return Field{
+ key: key,
+ fieldType: uint32Type,
+ numericVal: int64(val),
+ }
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+ return Field{
+ key: key,
+ fieldType: uint64Type,
+ numericVal: int64(val),
+ }
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+ return Field{
+ key: key,
+ fieldType: float32Type,
+ numericVal: int64(math.Float32bits(val)),
+ }
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+ return Field{
+ key: key,
+ fieldType: float64Type,
+ numericVal: int64(math.Float64bits(val)),
+ }
+}
+
+// Error adds an error with the key "error.object" to a Span.LogFields() record
+func Error(err error) Field {
+ return Field{
+ key: "error.object",
+ fieldType: errorType,
+ interfaceVal: err,
+ }
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+// Please pass in an immutable object, otherwise there may be concurrency issues.
+// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
+// Because span is sent asynchronously, it is possible that this map will also be modified.
+func Object(key string, obj interface{}) Field {
+ return Field{
+ key: key,
+ fieldType: objectType,
+ interfaceVal: obj,
+ }
+}
+
+// Event creates a string-valued Field for span logs with key="event" and value=val.
+func Event(val string) Field {
+ return String("event", val)
+}
+
+// Message creates a string-valued Field for span logs with key="message" and value=val.
+func Message(val string) Field {
+ return String("message", val)
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+ return Field{
+ fieldType: lazyLoggerType,
+ interfaceVal: ll,
+ }
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+// func customerField(order *Order) log.Field {
+// if os.Getenv("ENVIRONMENT") == "dev" {
+// return log.String("customer", order.Customer.ID)
+// }
+// return log.Noop()
+// }
+//
+// span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+ return Field{
+ fieldType: noopType,
+ }
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+ EmitString(key, value string)
+ EmitBool(key string, value bool)
+ EmitInt(key string, value int)
+ EmitInt32(key string, value int32)
+ EmitInt64(key string, value int64)
+ EmitUint32(key string, value uint32)
+ EmitUint64(key string, value uint64)
+ EmitFloat32(key string, value float32)
+ EmitFloat64(key string, value float64)
+ EmitObject(key string, value interface{})
+ EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+ switch lf.fieldType {
+ case stringType:
+ visitor.EmitString(lf.key, lf.stringVal)
+ case boolType:
+ visitor.EmitBool(lf.key, lf.numericVal != 0)
+ case intType:
+ visitor.EmitInt(lf.key, int(lf.numericVal))
+ case int32Type:
+ visitor.EmitInt32(lf.key, int32(lf.numericVal))
+ case int64Type:
+ visitor.EmitInt64(lf.key, int64(lf.numericVal))
+ case uint32Type:
+ visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+ case uint64Type:
+ visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+ case float32Type:
+ visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+ case float64Type:
+ visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+ case errorType:
+ if err, ok := lf.interfaceVal.(error); ok {
+ visitor.EmitString(lf.key, err.Error())
+ } else {
+ visitor.EmitString(lf.key, "<nil>")
+ }
+ case objectType:
+ visitor.EmitObject(lf.key, lf.interfaceVal)
+ case lazyLoggerType:
+ visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+ case noopType:
+ // intentionally left blank
+ }
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+ return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+ switch lf.fieldType {
+ case stringType:
+ return lf.stringVal
+ case boolType:
+ return lf.numericVal != 0
+ case intType:
+ return int(lf.numericVal)
+ case int32Type:
+ return int32(lf.numericVal)
+ case int64Type:
+ return int64(lf.numericVal)
+ case uint32Type:
+ return uint32(lf.numericVal)
+ case uint64Type:
+ return uint64(lf.numericVal)
+ case float32Type:
+ return math.Float32frombits(uint32(lf.numericVal))
+ case float64Type:
+ return math.Float64frombits(uint64(lf.numericVal))
+ case errorType, objectType, lazyLoggerType:
+ return lf.interfaceVal
+ case noopType:
+ return nil
+ default:
+ return nil
+ }
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+ return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 0000000..d57e28a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,61 @@
+package log
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+ if len(keyValues)%2 != 0 {
+ return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+ }
+ fields := make([]Field, len(keyValues)/2)
+ for i := 0; i*2 < len(keyValues); i++ {
+ key, ok := keyValues[i*2].(string)
+ if !ok {
+ return nil, fmt.Errorf(
+ "non-string key (pair #%d): %T",
+ i, keyValues[i*2])
+ }
+ switch typedVal := keyValues[i*2+1].(type) {
+ case bool:
+ fields[i] = Bool(key, typedVal)
+ case string:
+ fields[i] = String(key, typedVal)
+ case int:
+ fields[i] = Int(key, typedVal)
+ case int8:
+ fields[i] = Int32(key, int32(typedVal))
+ case int16:
+ fields[i] = Int32(key, int32(typedVal))
+ case int32:
+ fields[i] = Int32(key, typedVal)
+ case int64:
+ fields[i] = Int64(key, typedVal)
+ case uint:
+ fields[i] = Uint64(key, uint64(typedVal))
+ case uint64:
+ fields[i] = Uint64(key, typedVal)
+ case uint8:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint16:
+ fields[i] = Uint32(key, uint32(typedVal))
+ case uint32:
+ fields[i] = Uint32(key, typedVal)
+ case float32:
+ fields[i] = Float32(key, typedVal)
+ case float64:
+ fields[i] = Float64(key, typedVal)
+ default:
+ if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
+ fields[i] = String(key, "nil")
+ continue
+ }
+ // When in doubt, coerce to a string
+ fields[i] = String(key, fmt.Sprint(typedVal))
+ }
+ }
+ return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 0000000..f9b680a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+ defaultNoopSpanContext SpanContext = noopSpanContext{}
+ defaultNoopSpan Span = noopSpan{}
+ defaultNoopTracer Tracer = NoopTracer{}
+)
+
+const (
+ emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span { return n }
+func (n noopSpan) BaggageItem(key string) string { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span { return n }
+func (n noopSpan) LogFields(fields ...log.Field) {}
+func (n noopSpan) LogKV(keyVals ...interface{}) {}
+func (n noopSpan) Finish() {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions) {}
+func (n noopSpan) SetOperationName(operationName string) Span { return n }
+func (n noopSpan) Tracer() Tracer { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string) {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData) {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+ return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+ return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+ return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 0000000..b0c275e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+ "errors"
+ "net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+ // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+ // Tracer.Extract() is not recognized by the Tracer implementation.
+ ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+ // ErrSpanContextNotFound occurs when the `carrier` passed to
+ // Tracer.Extract() is valid and uncorrupted but has insufficient
+ // information to extract a SpanContext.
+ ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+ // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+ // operate on a SpanContext which it is not prepared to handle (for
+ // example, since it was created by a different tracer implementation).
+ ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+ // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+ // implementations expect a different type of `carrier` than they are
+ // given.
+ ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+ // ErrSpanContextCorrupted occurs when the `carrier` passed to
+ // Tracer.Extract() is of the expected type but is corrupted.
+ ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+ // Binary represents SpanContexts as opaque binary data.
+ //
+ // For Tracer.Inject(): the carrier must be an `io.Writer`.
+ //
+ // For Tracer.Extract(): the carrier must be an `io.Reader`.
+ Binary BuiltinFormat = iota
+
+ // TextMap represents SpanContexts as key:value string pairs.
+ //
+ // Unlike HTTPHeaders, the TextMap format does not restrict the key or
+ // value character sets in any way.
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ TextMap
+
+ // HTTPHeaders represents SpanContexts as HTTP header string pairs.
+ //
+ // Unlike TextMap, the HTTPHeaders format requires that the keys and values
+ // be valid as HTTP headers as-is (i.e., character casing may be unstable
+ // and special characters are disallowed in keys, values should be
+ // URL-escaped, etc).
+ //
+ // For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+ //
+ // For Tracer.Extract(): the carrier must be a `TextMapReader`.
+ //
+ // See HTTPHeadersCarrier for an implementation of both TextMapWriter
+ // and TextMapReader that defers to an http.Header instance for storage.
+ // For example, Inject():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := span.Tracer().Inject(
+ // span.Context(), opentracing.HTTPHeaders, carrier)
+ //
+ // Or Extract():
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(
+ // opentracing.HTTPHeaders, carrier)
+ //
+ HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+ // Set a key:value pair to the carrier. Multiple calls to Set() for the
+ // same key leads to undefined behavior.
+ //
+ // NOTE: The backing store for the TextMapWriter may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+ // ForeachKey returns TextMap contents via repeated calls to the `handler`
+ // function. If any call to `handler` returns a non-nil error, ForeachKey
+ // terminates and returns that error.
+ //
+ // NOTE: The backing store for the TextMapReader may contain data unrelated
+ // to SpanContext. As such, Inject() and Extract() implementations that
+ // call the TextMapWriter and TextMapReader interfaces must agree on a
+ // prefix or other convention to distinguish their own key:value pairs.
+ //
+ // The "foreach" callback pattern reduces unnecessary copying in some cases
+ // and also allows implementations to hold locks while the map is read.
+ ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, v := range c {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+ c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+// err := tracer.Inject(
+// span.Context(),
+// opentracing.HTTPHeaders,
+// carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+ h := http.Header(c)
+ h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+ for k, vals := range c {
+ for _, v := range vals {
+ if err := handler(k, v); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 0000000..0d3fb53
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
+type SpanContext interface {
+ // ForeachBaggageItem grants access to all baggage items stored in the
+ // SpanContext.
+ // The handler function will be called for each baggage key/value pair.
+ // The ordering of items is not guaranteed.
+ //
+ // The bool return value indicates if the handler wants to continue iterating
+ // through the rest of the baggage items; for example if the handler is trying to
+ // find some baggage item by pattern matching the name, it can return false
+ // as soon as the item is found to stop further iterations.
+ ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+ // Sets the end timestamp and finalizes Span state.
+ //
+ // With the exception of calls to Context() (which are always allowed),
+ // Finish() must be the last call made to any span instance, and to do
+ // otherwise leads to undefined behavior.
+ Finish()
+ // FinishWithOptions is like Finish() but with explicit control over
+ // timestamps and log data.
+ FinishWithOptions(opts FinishOptions)
+
+ // Context() yields the SpanContext for this Span. Note that the return
+ // value of Context() is still valid after a call to Span.Finish(), as is
+ // a call to Span.Context() after a call to Span.Finish().
+ Context() SpanContext
+
+ // Sets or changes the operation name.
+ //
+ // Returns a reference to this Span for chaining.
+ SetOperationName(operationName string) Span
+
+ // Adds a tag to the span.
+ //
+ // If there is a pre-existing tag set for `key`, it is overwritten.
+ //
+ // Tag values can be numeric types, strings, or bools. The behavior of
+ // other tag value types is undefined at the OpenTracing level. If a
+ // tracing system does not know how to handle a particular value type, it
+ // may ignore the tag, but shall not panic.
+ //
+ // Returns a reference to this Span for chaining.
+ SetTag(key string, value interface{}) Span
+
+ // LogFields is an efficient and type-checked way to record key:value
+ // logging data about a Span, though the programming interface is a little
+ // more verbose than LogKV(). Here's an example:
+ //
+ // span.LogFields(
+ // log.String("event", "soft error"),
+ // log.String("type", "cache timeout"),
+ // log.Int("waited.millis", 1500))
+ //
+ // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+ LogFields(fields ...log.Field)
+
+ // LogKV is a concise, readable way to record key:value logging data about
+ // a Span, though unfortunately this also makes it less efficient and less
+ // type-safe than LogFields(). Here's an example:
+ //
+ // span.LogKV(
+ // "event", "soft error",
+ // "type", "cache timeout",
+ // "waited.millis", 1500)
+ //
+ // For LogKV (as opposed to LogFields()), the parameters must appear as
+ // key-value pairs, like
+ //
+ // span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+ //
+ // The keys must all be strings. The values may be strings, numeric types,
+ // bools, Go error instances, or arbitrary structs.
+ //
+ // (Note to implementors: consider the log.InterleavedKVToFields() helper)
+ LogKV(alternatingKeyValues ...interface{})
+
+ // SetBaggageItem sets a key:value pair on this Span and its SpanContext
+ // that also propagates to descendants of this Span.
+ //
+ // SetBaggageItem() enables powerful functionality given a full-stack
+ // opentracing integration (e.g., arbitrary application data from a mobile
+ // app can make it, transparently, all the way into the depths of a storage
+ // system), and with it some powerful costs: use this feature with care.
+ //
+ // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+ // *future* causal descendants of the associated Span.
+ //
+ // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+ // value is copied into every local *and remote* child of the associated
+ // Span, and that can add up to a lot of network and cpu overhead.
+ //
+ // Returns a reference to this Span for chaining.
+ SetBaggageItem(restrictedKey, value string) Span
+
+ // Gets the value for a baggage item given its key. Returns the empty string
+ // if the value isn't found in this Span.
+ BaggageItem(restrictedKey string) string
+
+ // Provides access to the Tracer that created this Span.
+ Tracer() Tracer
+
+ // Deprecated: use LogFields or LogKV
+ LogEvent(event string)
+ // Deprecated: use LogFields or LogKV
+ LogEventWithPayload(event string, payload interface{})
+ // Deprecated: use LogFields or LogKV
+ Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+ Timestamp time.Time
+ Fields []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+ // FinishTime overrides the Span's finish time, or implicitly becomes
+ // time.Now() if FinishTime.IsZero().
+ //
+ // FinishTime must resolve to a timestamp that's >= the Span's StartTime
+ // (per StartSpanOptions).
+ FinishTime time.Time
+
+ // LogRecords allows the caller to specify the contents of many LogFields()
+ // calls with a single slice. May be nil.
+ //
+ // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+ // be set explicitly). Also, they must be >= the Span's start timestamp and
+ // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+ // behavior of FinishWithOptions() is undefined.
+ //
+ // If specified, the caller hands off ownership of LogRecords at
+ // FinishWithOptions() invocation time.
+ //
+ // If specified, the (deprecated) BulkLogData must be nil or empty.
+ LogRecords []LogRecord
+
+ // BulkLogData is DEPRECATED.
+ BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+ Timestamp time.Time
+ Event string
+ Payload interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+ var literalTimestamp time.Time
+ if ld.Timestamp.IsZero() {
+ literalTimestamp = time.Now()
+ } else {
+ literalTimestamp = ld.Timestamp
+ }
+ rval := LogRecord{
+ Timestamp: literalTimestamp,
+ }
+ if ld.Payload == nil {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ }
+ } else {
+ rval.Fields = []log.Field{
+ log.String("event", ld.Event),
+ log.Object("payload", ld.Payload),
+ }
+ }
+ return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 0000000..715f0ce
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,304 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+ // Create, start, and return a new Span with the given `operationName` and
+ // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+ // from the "functional options" pattern, per
+ // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+ //
+ // A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+ // opentracing.FollowsFrom()) becomes the root of its own trace.
+ //
+ // Examples:
+ //
+ // var tracer opentracing.Tracer = ...
+ //
+ // // The root-span case:
+ // sp := tracer.StartSpan("GetFeed")
+ //
+ // // The vanilla child span case:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()))
+ //
+ // // All the bells and whistles:
+ // sp := tracer.StartSpan(
+ // "GetFeed",
+ // opentracing.ChildOf(parentSpan.Context()),
+ // opentracing.Tag{"user_agent", loggedReq.UserAgent},
+ // opentracing.StartTime(loggedReq.Timestamp),
+ // )
+ //
+ StartSpan(operationName string, opts ...StartSpanOption) Span
+
+ // Inject() takes the `sm` SpanContext instance and injects it for
+ // propagation within `carrier`. The actual type of `carrier` depends on
+ // the value of `format`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see https://godoc.org/context#WithValue).
+ //
+ // Example usage (sans error handling):
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // err := tracer.Inject(
+ // span.Context(),
+ // opentracing.HTTPHeaders,
+ // carrier)
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Implementations may return opentracing.ErrUnsupportedFormat if `format`
+ // is not supported by (or not known by) the implementation.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if the format is supported but injection
+ // fails anyway.
+ //
+ // See Tracer.Extract().
+ Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+ // Extract() returns a SpanContext instance given `format` and `carrier`.
+ //
+ // OpenTracing defines a common set of `format` values (see BuiltinFormat),
+ // and each has an expected carrier type.
+ //
+ // Other packages may declare their own `format` values, much like the keys
+ // used by `context.Context` (see
+ // https://godoc.org/golang.org/x/net/context#WithValue).
+ //
+ // Example usage (with StartSpan):
+ //
+ //
+ // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+ // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+ //
+ // // ... assuming the ultimate goal here is to resume the trace with a
+ // // server-side Span:
+ // var serverSpan opentracing.Span
+ // if err == nil {
+ // span = tracer.StartSpan(
+ // rpcMethodName, ext.RPCServerOption(clientContext))
+ // } else {
+ // span = tracer.StartSpan(rpcMethodName)
+ // }
+ //
+ //
+ // NOTE: All opentracing.Tracer implementations MUST support all
+ // BuiltinFormats.
+ //
+ // Return values:
+ // - A successful Extract returns a SpanContext instance and a nil error
+ // - If there was simply no SpanContext to extract in `carrier`, Extract()
+ // returns (nil, opentracing.ErrSpanContextNotFound)
+ // - If `format` is unsupported or unrecognized, Extract() returns (nil,
+ // opentracing.ErrUnsupportedFormat)
+ // - If there are more fundamental problems with the `carrier` object,
+ // Extract() may return opentracing.ErrInvalidCarrier,
+ // opentracing.ErrSpanContextCorrupted, or implementation-specific
+ // errors.
+ //
+ // See Tracer.Inject().
+ Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+// func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+// sso := opentracing.StartSpanOptions{}
+// for _, o := range opts {
+// o.Apply(&sso)
+// }
+// ...
+// }
+//
+type StartSpanOptions struct {
+ // Zero or more causal references to other Spans (via their SpanContext).
+ // If empty, start a "root" Span (i.e., start a new trace).
+ References []SpanReference
+
+ // StartTime overrides the Span's start time, or implicitly becomes
+ // time.Now() if StartTime.IsZero().
+ StartTime time.Time
+
+ // Tags may have zero or more entries; the restrictions on map values are
+ // identical to those for Span.SetTag(). May be nil.
+ //
+ // If specified, the caller hands off ownership of Tags at
+ // StartSpan() invocation time.
+ Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+ Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+ // ChildOfRef refers to a parent Span that caused *and* somehow depends
+ // upon the new child Span. Often (but not always), the parent Span cannot
+ // finish until the child Span does.
+ //
+ // An timing diagram for a ChildOfRef that's blocked on the new Span:
+ //
+ // [-Parent Span---------]
+ // [-Child Span----]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.ChildOf()
+ ChildOfRef SpanReferenceType = iota
+
+ // FollowsFromRef refers to a parent Span that does not depend in any way
+ // on the result of the new child Span. For instance, one might use
+ // FollowsFromRefs to describe pipeline stages separated by queues,
+ // or a fire-and-forget cache insert at the tail end of a web request.
+ //
+ // A FollowsFromRef Span is part of the same logical trace as the new Span:
+ // i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+ //
+ // All of the following could be valid timing diagrams for children that
+ // "FollowFrom" a parent.
+ //
+ // [-Parent Span-] [-Child Span-]
+ //
+ //
+ // [-Parent Span--]
+ // [-Child Span-]
+ //
+ //
+ // [-Parent Span-]
+ // [-Child Span-]
+ //
+ // See http://opentracing.io/spec/
+ //
+ // See opentracing.FollowsFrom()
+ FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships. If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+// sc, _ := tracer.Extract(someFormat, someCarrier)
+// span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+ Type SpanReferenceType
+ ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+ if r.ReferencedContext != nil {
+ o.References = append(o.References, r)
+ }
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: ChildOfRef,
+ ReferencedContext: sc,
+ }
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+ return SpanReference{
+ Type: FollowsFromRef,
+ ReferencedContext: sc,
+ }
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+ o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ for k, v := range t {
+ o.Tags[k] = v
+ }
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+// or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+ Key string
+ Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+ if o.Tags == nil {
+ o.Tags = make(map[string]interface{})
+ }
+ o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+ s.SetTag(t.Key, t.Value)
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..9159de0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 0000000..ce9d7cd
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..54dfdcb
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..161aea2
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 0000000..be0d10d
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..779a834
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (<funcname>\n\t<path>)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE
new file mode 100644
index 0000000..c67dad6
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+ The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
new file mode 100644
index 0000000..003e99f
--- /dev/null
+++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go
@@ -0,0 +1,772 @@
+// Package difflib is a partial port of Python difflib module.
+//
+// It provides tools to compare sequences of strings and generate textual diffs.
+//
+// The following class and functions have been ported:
+//
+// - SequenceMatcher
+//
+// - unified_diff
+//
+// - context_diff
+//
+// Getting unified diffs was the main goal of the port. Keep in mind this code
+// is mostly suitable to output text differences in a human friendly way, there
+// are no guarantees generated diffs are consumable by patch(1).
+package difflib
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+func calculateRatio(matches, length int) float64 {
+ if length > 0 {
+ return 2.0 * float64(matches) / float64(length)
+ }
+ return 1.0
+}
+
+type Match struct {
+ A int
+ B int
+ Size int
+}
+
+type OpCode struct {
+ Tag byte
+ I1 int
+ I2 int
+ J1 int
+ J2 int
+}
+
+// SequenceMatcher compares sequence of strings. The basic
+// algorithm predates, and is a little fancier than, an algorithm
+// published in the late 1980's by Ratcliff and Obershelp under the
+// hyperbolic name "gestalt pattern matching". The basic idea is to find
+// the longest contiguous matching subsequence that contains no "junk"
+// elements (R-O doesn't address junk). The same idea is then applied
+// recursively to the pieces of the sequences to the left and to the right
+// of the matching subsequence. This does not yield minimal edit
+// sequences, but does tend to yield matches that "look right" to people.
+//
+// SequenceMatcher tries to compute a "human-friendly diff" between two
+// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the
+// longest *contiguous* & junk-free matching subsequence. That's what
+// catches peoples' eyes. The Windows(tm) windiff has another interesting
+// notion, pairing up elements that appear uniquely in each sequence.
+// That, and the method here, appear to yield more intuitive difference
+// reports than does diff. This method appears to be the least vulnerable
+// to synching up on blocks of "junk lines", though (like blank lines in
+// ordinary text files, or maybe "<P>" lines in HTML files). That may be
+// because this is the only method of the 3 that has a *concept* of
+// "junk" <wink>.
+//
+// Timing: Basic R-O is cubic time worst case and quadratic time expected
+// case. SequenceMatcher is quadratic time for the worst case and has
+// expected-case behavior dependent in a complicated way on how many
+// elements the sequences have in common; best case time is linear.
+type SequenceMatcher struct {
+ a []string
+ b []string
+ b2j map[string][]int
+ IsJunk func(string) bool
+ autoJunk bool
+ bJunk map[string]struct{}
+ matchingBlocks []Match
+ fullBCount map[string]int
+ bPopular map[string]struct{}
+ opCodes []OpCode
+}
+
+func NewMatcher(a, b []string) *SequenceMatcher {
+ m := SequenceMatcher{autoJunk: true}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+func NewMatcherWithJunk(a, b []string, autoJunk bool,
+ isJunk func(string) bool) *SequenceMatcher {
+
+ m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}
+ m.SetSeqs(a, b)
+ return &m
+}
+
+// Set two sequences to be compared.
+func (m *SequenceMatcher) SetSeqs(a, b []string) {
+ m.SetSeq1(a)
+ m.SetSeq2(b)
+}
+
+// Set the first sequence to be compared. The second sequence to be compared is
+// not changed.
+//
+// SequenceMatcher computes and caches detailed information about the second
+// sequence, so if you want to compare one sequence S against many sequences,
+// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other
+// sequences.
+//
+// See also SetSeqs() and SetSeq2().
+func (m *SequenceMatcher) SetSeq1(a []string) {
+ if &a == &m.a {
+ return
+ }
+ m.a = a
+ m.matchingBlocks = nil
+ m.opCodes = nil
+}
+
+// Set the second sequence to be compared. The first sequence to be compared is
+// not changed.
+func (m *SequenceMatcher) SetSeq2(b []string) {
+ if &b == &m.b {
+ return
+ }
+ m.b = b
+ m.matchingBlocks = nil
+ m.opCodes = nil
+ m.fullBCount = nil
+ m.chainB()
+}
+
+func (m *SequenceMatcher) chainB() {
+ // Populate line -> index mapping
+ b2j := map[string][]int{}
+ for i, s := range m.b {
+ indices := b2j[s]
+ indices = append(indices, i)
+ b2j[s] = indices
+ }
+
+ // Purge junk elements
+ m.bJunk = map[string]struct{}{}
+ if m.IsJunk != nil {
+ junk := m.bJunk
+ for s, _ := range b2j {
+ if m.IsJunk(s) {
+ junk[s] = struct{}{}
+ }
+ }
+ for s, _ := range junk {
+ delete(b2j, s)
+ }
+ }
+
+ // Purge remaining popular elements
+ popular := map[string]struct{}{}
+ n := len(m.b)
+ if m.autoJunk && n >= 200 {
+ ntest := n/100 + 1
+ for s, indices := range b2j {
+ if len(indices) > ntest {
+ popular[s] = struct{}{}
+ }
+ }
+ for s, _ := range popular {
+ delete(b2j, s)
+ }
+ }
+ m.bPopular = popular
+ m.b2j = b2j
+}
+
+func (m *SequenceMatcher) isBJunk(s string) bool {
+ _, ok := m.bJunk[s]
+ return ok
+}
+
+// Find longest matching block in a[alo:ahi] and b[blo:bhi].
+//
+// If IsJunk is not defined:
+//
+// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
+// alo <= i <= i+k <= ahi
+// blo <= j <= j+k <= bhi
+// and for all (i',j',k') meeting those conditions,
+// k >= k'
+// i <= i'
+// and if i == i', j <= j'
+//
+// In other words, of all maximal matching blocks, return one that
+// starts earliest in a, and of all those maximal matching blocks that
+// start earliest in a, return the one that starts earliest in b.
+//
+// If IsJunk is defined, first the longest matching block is
+// determined as above, but with the additional restriction that no
+// junk element appears in the block. Then that block is extended as
+// far as possible by matching (only) junk elements on both sides. So
+// the resulting block never matches on junk except as identical junk
+// happens to be adjacent to an "interesting" match.
+//
+// If no blocks match, return (alo, blo, 0).
+func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {
+ // CAUTION: stripping common prefix or suffix would be incorrect.
+ // E.g.,
+ // ab
+ // acab
+ // Longest matching block is "ab", but if common prefix is
+ // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so
+ // strip, so ends up claiming that ab is changed to acab by
+ // inserting "ca" in the middle. That's minimal but unintuitive:
+ // "it's obvious" that someone inserted "ac" at the front.
+ // Windiff ends up at the same place as diff, but by pairing up
+ // the unique 'b's and then matching the first two 'a's.
+ besti, bestj, bestsize := alo, blo, 0
+
+ // find longest junk-free match
+ // during an iteration of the loop, j2len[j] = length of longest
+ // junk-free match ending with a[i-1] and b[j]
+ j2len := map[int]int{}
+ for i := alo; i != ahi; i++ {
+ // look at all instances of a[i] in b; note that because
+ // b2j has no junk keys, the loop is skipped if a[i] is junk
+ newj2len := map[int]int{}
+ for _, j := range m.b2j[m.a[i]] {
+ // a[i] matches b[j]
+ if j < blo {
+ continue
+ }
+ if j >= bhi {
+ break
+ }
+ k := j2len[j-1] + 1
+ newj2len[j] = k
+ if k > bestsize {
+ besti, bestj, bestsize = i-k+1, j-k+1, k
+ }
+ }
+ j2len = newj2len
+ }
+
+ // Extend the best by non-junk elements on each end. In particular,
+ // "popular" non-junk elements aren't in b2j, which greatly speeds
+ // the inner loop above, but also means "the best" match so far
+ // doesn't contain any junk *or* popular non-junk elements.
+ for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ !m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ // Now that we have a wholly interesting match (albeit possibly
+ // empty!), we may as well suck up the matching junk on each
+ // side of it too. Can't think of a good reason not to, and it
+ // saves post-processing the (possibly considerable) expense of
+ // figuring out what to do with it. In the case of an empty
+ // interesting match, this is clearly the right thing to do,
+ // because no other kind of match is possible in the regions.
+ for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&
+ m.a[besti-1] == m.b[bestj-1] {
+ besti, bestj, bestsize = besti-1, bestj-1, bestsize+1
+ }
+ for besti+bestsize < ahi && bestj+bestsize < bhi &&
+ m.isBJunk(m.b[bestj+bestsize]) &&
+ m.a[besti+bestsize] == m.b[bestj+bestsize] {
+ bestsize += 1
+ }
+
+ return Match{A: besti, B: bestj, Size: bestsize}
+}
+
+// Return list of triples describing matching subsequences.
+//
+// Each triple is of the form (i, j, n), and means that
+// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in
+// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are
+// adjacent triples in the list, and the second is not the last triple in the
+// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe
+// adjacent equal blocks.
+//
+// The last triple is a dummy, (len(a), len(b), 0), and is the only
+// triple with n==0.
+func (m *SequenceMatcher) GetMatchingBlocks() []Match {
+ if m.matchingBlocks != nil {
+ return m.matchingBlocks
+ }
+
+ var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match
+ matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {
+ match := m.findLongestMatch(alo, ahi, blo, bhi)
+ i, j, k := match.A, match.B, match.Size
+ if match.Size > 0 {
+ if alo < i && blo < j {
+ matched = matchBlocks(alo, i, blo, j, matched)
+ }
+ matched = append(matched, match)
+ if i+k < ahi && j+k < bhi {
+ matched = matchBlocks(i+k, ahi, j+k, bhi, matched)
+ }
+ }
+ return matched
+ }
+ matched := matchBlocks(0, len(m.a), 0, len(m.b), nil)
+
+ // It's possible that we have adjacent equal blocks in the
+ // matching_blocks list now.
+ nonAdjacent := []Match{}
+ i1, j1, k1 := 0, 0, 0
+ for _, b := range matched {
+ // Is this block adjacent to i1, j1, k1?
+ i2, j2, k2 := b.A, b.B, b.Size
+ if i1+k1 == i2 && j1+k1 == j2 {
+ // Yes, so collapse them -- this just increases the length of
+ // the first block by the length of the second, and the first
+ // block so lengthened remains the block to compare against.
+ k1 += k2
+ } else {
+ // Not adjacent. Remember the first block (k1==0 means it's
+ // the dummy we started with), and make the second block the
+ // new block to compare against.
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+ i1, j1, k1 = i2, j2, k2
+ }
+ }
+ if k1 > 0 {
+ nonAdjacent = append(nonAdjacent, Match{i1, j1, k1})
+ }
+
+ nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})
+ m.matchingBlocks = nonAdjacent
+ return m.matchingBlocks
+}
+
+// Return list of 5-tuples describing how to turn a into b.
+//
+// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple
+// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the
+// tuple preceding it, and likewise for j1 == the previous j2.
+//
+// The tags are characters, with these meanings:
+//
+// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2]
+//
+// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case.
+//
+// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.
+//
+// 'e' (equal): a[i1:i2] == b[j1:j2]
+func (m *SequenceMatcher) GetOpCodes() []OpCode {
+ if m.opCodes != nil {
+ return m.opCodes
+ }
+ i, j := 0, 0
+ matching := m.GetMatchingBlocks()
+ opCodes := make([]OpCode, 0, len(matching))
+ for _, m := range matching {
+ // invariant: we've pumped out correct diffs to change
+ // a[:i] into b[:j], and the next matching block is
+ // a[ai:ai+size] == b[bj:bj+size]. So we need to pump
+ // out a diff to change a[i:ai] into b[j:bj], pump out
+ // the matching block, and move (i,j) beyond the match
+ ai, bj, size := m.A, m.B, m.Size
+ tag := byte(0)
+ if i < ai && j < bj {
+ tag = 'r'
+ } else if i < ai {
+ tag = 'd'
+ } else if j < bj {
+ tag = 'i'
+ }
+ if tag > 0 {
+ opCodes = append(opCodes, OpCode{tag, i, ai, j, bj})
+ }
+ i, j = ai+size, bj+size
+ // the list of matching blocks is terminated by a
+ // sentinel with size 0
+ if size > 0 {
+ opCodes = append(opCodes, OpCode{'e', ai, i, bj, j})
+ }
+ }
+ m.opCodes = opCodes
+ return m.opCodes
+}
+
+// Isolate change clusters by eliminating ranges with no changes.
+//
+// Return a generator of groups with up to n lines of context.
+// Each group is in the same format as returned by GetOpCodes().
+func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
+ if n < 0 {
+ n = 3
+ }
+ codes := m.GetOpCodes()
+ if len(codes) == 0 {
+ codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
+ }
+ // Fixup leading and trailing groups if they show no changes.
+ if codes[0].Tag == 'e' {
+ c := codes[0]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}
+ }
+ if codes[len(codes)-1].Tag == 'e' {
+ c := codes[len(codes)-1]
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}
+ }
+ nn := n + n
+ groups := [][]OpCode{}
+ group := []OpCode{}
+ for _, c := range codes {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ // End the current group and start a new one whenever
+ // there is a large range with no changes.
+ if c.Tag == 'e' && i2-i1 > nn {
+ group = append(group, OpCode{c.Tag, i1, min(i2, i1+n),
+ j1, min(j2, j1+n)})
+ groups = append(groups, group)
+ group = []OpCode{}
+ i1, j1 = max(i1, i2-n), max(j1, j2-n)
+ }
+ group = append(group, OpCode{c.Tag, i1, i2, j1, j2})
+ }
+ if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {
+ groups = append(groups, group)
+ }
+ return groups
+}
+
+// Return a measure of the sequences' similarity (float in [0,1]).
+//
+// Where T is the total number of elements in both sequences, and
+// M is the number of matches, this is 2.0*M / T.
+// Note that this is 1 if the sequences are identical, and 0 if
+// they have nothing in common.
+//
+// .Ratio() is expensive to compute if you haven't already computed
+// .GetMatchingBlocks() or .GetOpCodes(), in which case you may
+// want to try .QuickRatio() or .RealQuickRation() first to get an
+// upper bound.
+func (m *SequenceMatcher) Ratio() float64 {
+ matches := 0
+ for _, m := range m.GetMatchingBlocks() {
+ matches += m.Size
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() relatively quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute.
+func (m *SequenceMatcher) QuickRatio() float64 {
+ // viewing a and b as multisets, set matches to the cardinality
+ // of their intersection; this counts the number of matches
+ // without regard to order, so is clearly an upper bound
+ if m.fullBCount == nil {
+ m.fullBCount = map[string]int{}
+ for _, s := range m.b {
+ m.fullBCount[s] = m.fullBCount[s] + 1
+ }
+ }
+
+ // avail[x] is the number of times x appears in 'b' less the
+ // number of times we've seen it in 'a' so far ... kinda
+ avail := map[string]int{}
+ matches := 0
+ for _, s := range m.a {
+ n, ok := avail[s]
+ if !ok {
+ n = m.fullBCount[s]
+ }
+ avail[s] = n - 1
+ if n > 0 {
+ matches += 1
+ }
+ }
+ return calculateRatio(matches, len(m.a)+len(m.b))
+}
+
+// Return an upper bound on ratio() very quickly.
+//
+// This isn't defined beyond that it is an upper bound on .Ratio(), and
+// is faster to compute than either .Ratio() or .QuickRatio().
+func (m *SequenceMatcher) RealQuickRatio() float64 {
+ la, lb := len(m.a), len(m.b)
+ return calculateRatio(min(la, lb), la+lb)
+}
+
+// Convert range to the "ed" format
+func formatRangeUnified(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ return fmt.Sprintf("%d,%d", beginning, length)
+}
+
+// Unified diff parameters
+type UnifiedDiff struct {
+ A []string // First sequence lines
+ FromFile string // First file name
+ FromDate string // First file time
+ B []string // Second sequence lines
+ ToFile string // Second file name
+ ToDate string // Second file time
+ Eol string // Headers end of line, defaults to LF
+ Context int // Number of context lines
+}
+
+// Compare two sequences of lines; generate the delta as a unified diff.
+//
+// Unified diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by 'n' which
+// defaults to three.
+//
+// By default, the diff control lines (those with ---, +++, or @@) are
+// created with a trailing newline. This is helpful so that inputs
+// created from file.readlines() result in diffs that are suitable for
+// file.writelines() since both the inputs and outputs have trailing
+// newlines.
+//
+// For inputs that do not have trailing newlines, set the lineterm
+// argument to "" so that the output will be uniformly newline free.
+//
+// The unidiff format normally has a header for filenames and modification
+// times. Any or all of these may be specified using strings for
+// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.
+// The modification times are normally expressed in the ISO 8601 format.
+func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ wf := func(format string, args ...interface{}) error {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ return err
+ }
+ ws := func(s string) error {
+ _, err := buf.WriteString(s)
+ return err
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ first, last := g[0], g[len(g)-1]
+ range1 := formatRangeUnified(first.I1, last.I2)
+ range2 := formatRangeUnified(first.J1, last.J2)
+ if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil {
+ return err
+ }
+ for _, c := range g {
+ i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2
+ if c.Tag == 'e' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws(" " + line); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, line := range diff.A[i1:i2] {
+ if err := ws("-" + line); err != nil {
+ return err
+ }
+ }
+ }
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, line := range diff.B[j1:j2] {
+ if err := ws("+" + line); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ }
+ return nil
+}
+
+// Like WriteUnifiedDiff but returns the diff a string.
+func GetUnifiedDiffString(diff UnifiedDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteUnifiedDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Convert range to the "ed" format.
+func formatRangeContext(start, stop int) string {
+ // Per the diff spec at http://www.unix.org/single_unix_specification/
+ beginning := start + 1 // lines start numbering with one
+ length := stop - start
+ if length == 0 {
+ beginning -= 1 // empty ranges begin at line just before the range
+ }
+ if length <= 1 {
+ return fmt.Sprintf("%d", beginning)
+ }
+ return fmt.Sprintf("%d,%d", beginning, beginning+length-1)
+}
+
+type ContextDiff UnifiedDiff
+
+// Compare two sequences of lines; generate the delta as a context diff.
+//
+// Context diffs are a compact way of showing line changes and a few
+// lines of context. The number of context lines is set by diff.Context
+// which defaults to three.
+//
+// By default, the diff control lines (those with *** or ---) are
+// created with a trailing newline.
+//
+// For inputs that do not have trailing newlines, set the diff.Eol
+// argument to "" so that the output will be uniformly newline free.
+//
+// The context diff format normally has a header for filenames and
+// modification times. Any or all of these may be specified using
+// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.
+// The modification times are normally expressed in the ISO 8601 format.
+// If not specified, the strings default to blanks.
+func WriteContextDiff(writer io.Writer, diff ContextDiff) error {
+ buf := bufio.NewWriter(writer)
+ defer buf.Flush()
+ var diffErr error
+ wf := func(format string, args ...interface{}) {
+ _, err := buf.WriteString(fmt.Sprintf(format, args...))
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+ ws := func(s string) {
+ _, err := buf.WriteString(s)
+ if diffErr == nil && err != nil {
+ diffErr = err
+ }
+ }
+
+ if len(diff.Eol) == 0 {
+ diff.Eol = "\n"
+ }
+
+ prefix := map[byte]string{
+ 'i': "+ ",
+ 'd': "- ",
+ 'r': "! ",
+ 'e': " ",
+ }
+
+ started := false
+ m := NewMatcher(diff.A, diff.B)
+ for _, g := range m.GetGroupedOpCodes(diff.Context) {
+ if !started {
+ started = true
+ fromDate := ""
+ if len(diff.FromDate) > 0 {
+ fromDate = "\t" + diff.FromDate
+ }
+ toDate := ""
+ if len(diff.ToDate) > 0 {
+ toDate = "\t" + diff.ToDate
+ }
+ if diff.FromFile != "" || diff.ToFile != "" {
+ wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol)
+ wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol)
+ }
+ }
+
+ first, last := g[0], g[len(g)-1]
+ ws("***************" + diff.Eol)
+
+ range1 := formatRangeContext(first.I1, last.I2)
+ wf("*** %s ****%s", range1, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'd' {
+ for _, cc := range g {
+ if cc.Tag == 'i' {
+ continue
+ }
+ for _, line := range diff.A[cc.I1:cc.I2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+
+ range2 := formatRangeContext(first.J1, last.J2)
+ wf("--- %s ----%s", range2, diff.Eol)
+ for _, c := range g {
+ if c.Tag == 'r' || c.Tag == 'i' {
+ for _, cc := range g {
+ if cc.Tag == 'd' {
+ continue
+ }
+ for _, line := range diff.B[cc.J1:cc.J2] {
+ ws(prefix[cc.Tag] + line)
+ }
+ }
+ break
+ }
+ }
+ }
+ return diffErr
+}
+
+// Like WriteContextDiff but returns the diff a string.
+func GetContextDiffString(diff ContextDiff) (string, error) {
+ w := &bytes.Buffer{}
+ err := WriteContextDiff(w, diff)
+ return string(w.Bytes()), err
+}
+
+// Split a string on "\n" while preserving them. The output can be used
+// as input for UnifiedDiff and ContextDiff structures.
+func SplitLines(s string) []string {
+ lines := strings.SplitAfter(s, "\n")
+ lines[len(lines)-1] += "\n"
+ return lines
+}
diff --git a/vendor/github.com/stretchr/testify/LICENSE b/vendor/github.com/stretchr/testify/LICENSE
new file mode 100644
index 0000000..4b0421c
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_compare.go b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
new file mode 100644
index 0000000..41649d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_compare.go
@@ -0,0 +1,394 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+type CompareType int
+
+const (
+ compareLess CompareType = iota - 1
+ compareEqual
+ compareGreater
+)
+
+var (
+ intType = reflect.TypeOf(int(1))
+ int8Type = reflect.TypeOf(int8(1))
+ int16Type = reflect.TypeOf(int16(1))
+ int32Type = reflect.TypeOf(int32(1))
+ int64Type = reflect.TypeOf(int64(1))
+
+ uintType = reflect.TypeOf(uint(1))
+ uint8Type = reflect.TypeOf(uint8(1))
+ uint16Type = reflect.TypeOf(uint16(1))
+ uint32Type = reflect.TypeOf(uint32(1))
+ uint64Type = reflect.TypeOf(uint64(1))
+
+ float32Type = reflect.TypeOf(float32(1))
+ float64Type = reflect.TypeOf(float64(1))
+
+ stringType = reflect.TypeOf("")
+)
+
+func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
+ obj1Value := reflect.ValueOf(obj1)
+ obj2Value := reflect.ValueOf(obj2)
+
+ // throughout this switch we try and avoid calling .Convert() if possible,
+ // as this has a pretty big performance impact
+ switch kind {
+ case reflect.Int:
+ {
+ intobj1, ok := obj1.(int)
+ if !ok {
+ intobj1 = obj1Value.Convert(intType).Interface().(int)
+ }
+ intobj2, ok := obj2.(int)
+ if !ok {
+ intobj2 = obj2Value.Convert(intType).Interface().(int)
+ }
+ if intobj1 > intobj2 {
+ return compareGreater, true
+ }
+ if intobj1 == intobj2 {
+ return compareEqual, true
+ }
+ if intobj1 < intobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int8:
+ {
+ int8obj1, ok := obj1.(int8)
+ if !ok {
+ int8obj1 = obj1Value.Convert(int8Type).Interface().(int8)
+ }
+ int8obj2, ok := obj2.(int8)
+ if !ok {
+ int8obj2 = obj2Value.Convert(int8Type).Interface().(int8)
+ }
+ if int8obj1 > int8obj2 {
+ return compareGreater, true
+ }
+ if int8obj1 == int8obj2 {
+ return compareEqual, true
+ }
+ if int8obj1 < int8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int16:
+ {
+ int16obj1, ok := obj1.(int16)
+ if !ok {
+ int16obj1 = obj1Value.Convert(int16Type).Interface().(int16)
+ }
+ int16obj2, ok := obj2.(int16)
+ if !ok {
+ int16obj2 = obj2Value.Convert(int16Type).Interface().(int16)
+ }
+ if int16obj1 > int16obj2 {
+ return compareGreater, true
+ }
+ if int16obj1 == int16obj2 {
+ return compareEqual, true
+ }
+ if int16obj1 < int16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int32:
+ {
+ int32obj1, ok := obj1.(int32)
+ if !ok {
+ int32obj1 = obj1Value.Convert(int32Type).Interface().(int32)
+ }
+ int32obj2, ok := obj2.(int32)
+ if !ok {
+ int32obj2 = obj2Value.Convert(int32Type).Interface().(int32)
+ }
+ if int32obj1 > int32obj2 {
+ return compareGreater, true
+ }
+ if int32obj1 == int32obj2 {
+ return compareEqual, true
+ }
+ if int32obj1 < int32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Int64:
+ {
+ int64obj1, ok := obj1.(int64)
+ if !ok {
+ int64obj1 = obj1Value.Convert(int64Type).Interface().(int64)
+ }
+ int64obj2, ok := obj2.(int64)
+ if !ok {
+ int64obj2 = obj2Value.Convert(int64Type).Interface().(int64)
+ }
+ if int64obj1 > int64obj2 {
+ return compareGreater, true
+ }
+ if int64obj1 == int64obj2 {
+ return compareEqual, true
+ }
+ if int64obj1 < int64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint:
+ {
+ uintobj1, ok := obj1.(uint)
+ if !ok {
+ uintobj1 = obj1Value.Convert(uintType).Interface().(uint)
+ }
+ uintobj2, ok := obj2.(uint)
+ if !ok {
+ uintobj2 = obj2Value.Convert(uintType).Interface().(uint)
+ }
+ if uintobj1 > uintobj2 {
+ return compareGreater, true
+ }
+ if uintobj1 == uintobj2 {
+ return compareEqual, true
+ }
+ if uintobj1 < uintobj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint8:
+ {
+ uint8obj1, ok := obj1.(uint8)
+ if !ok {
+ uint8obj1 = obj1Value.Convert(uint8Type).Interface().(uint8)
+ }
+ uint8obj2, ok := obj2.(uint8)
+ if !ok {
+ uint8obj2 = obj2Value.Convert(uint8Type).Interface().(uint8)
+ }
+ if uint8obj1 > uint8obj2 {
+ return compareGreater, true
+ }
+ if uint8obj1 == uint8obj2 {
+ return compareEqual, true
+ }
+ if uint8obj1 < uint8obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint16:
+ {
+ uint16obj1, ok := obj1.(uint16)
+ if !ok {
+ uint16obj1 = obj1Value.Convert(uint16Type).Interface().(uint16)
+ }
+ uint16obj2, ok := obj2.(uint16)
+ if !ok {
+ uint16obj2 = obj2Value.Convert(uint16Type).Interface().(uint16)
+ }
+ if uint16obj1 > uint16obj2 {
+ return compareGreater, true
+ }
+ if uint16obj1 == uint16obj2 {
+ return compareEqual, true
+ }
+ if uint16obj1 < uint16obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint32:
+ {
+ uint32obj1, ok := obj1.(uint32)
+ if !ok {
+ uint32obj1 = obj1Value.Convert(uint32Type).Interface().(uint32)
+ }
+ uint32obj2, ok := obj2.(uint32)
+ if !ok {
+ uint32obj2 = obj2Value.Convert(uint32Type).Interface().(uint32)
+ }
+ if uint32obj1 > uint32obj2 {
+ return compareGreater, true
+ }
+ if uint32obj1 == uint32obj2 {
+ return compareEqual, true
+ }
+ if uint32obj1 < uint32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Uint64:
+ {
+ uint64obj1, ok := obj1.(uint64)
+ if !ok {
+ uint64obj1 = obj1Value.Convert(uint64Type).Interface().(uint64)
+ }
+ uint64obj2, ok := obj2.(uint64)
+ if !ok {
+ uint64obj2 = obj2Value.Convert(uint64Type).Interface().(uint64)
+ }
+ if uint64obj1 > uint64obj2 {
+ return compareGreater, true
+ }
+ if uint64obj1 == uint64obj2 {
+ return compareEqual, true
+ }
+ if uint64obj1 < uint64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float32:
+ {
+ float32obj1, ok := obj1.(float32)
+ if !ok {
+ float32obj1 = obj1Value.Convert(float32Type).Interface().(float32)
+ }
+ float32obj2, ok := obj2.(float32)
+ if !ok {
+ float32obj2 = obj2Value.Convert(float32Type).Interface().(float32)
+ }
+ if float32obj1 > float32obj2 {
+ return compareGreater, true
+ }
+ if float32obj1 == float32obj2 {
+ return compareEqual, true
+ }
+ if float32obj1 < float32obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.Float64:
+ {
+ float64obj1, ok := obj1.(float64)
+ if !ok {
+ float64obj1 = obj1Value.Convert(float64Type).Interface().(float64)
+ }
+ float64obj2, ok := obj2.(float64)
+ if !ok {
+ float64obj2 = obj2Value.Convert(float64Type).Interface().(float64)
+ }
+ if float64obj1 > float64obj2 {
+ return compareGreater, true
+ }
+ if float64obj1 == float64obj2 {
+ return compareEqual, true
+ }
+ if float64obj1 < float64obj2 {
+ return compareLess, true
+ }
+ }
+ case reflect.String:
+ {
+ stringobj1, ok := obj1.(string)
+ if !ok {
+ stringobj1 = obj1Value.Convert(stringType).Interface().(string)
+ }
+ stringobj2, ok := obj2.(string)
+ if !ok {
+ stringobj2 = obj2Value.Convert(stringType).Interface().(string)
+ }
+ if stringobj1 > stringobj2 {
+ return compareGreater, true
+ }
+ if stringobj1 == stringobj2 {
+ return compareEqual, true
+ }
+ if stringobj1 < stringobj2 {
+ return compareLess, true
+ }
+ }
+ }
+
+ return compareEqual, false
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// assert.Greater(t, 2, 1)
+// assert.Greater(t, float64(2), float64(1))
+// assert.Greater(t, "b", "a")
+func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqual(t, 2, 1)
+// assert.GreaterOrEqual(t, 2, 2)
+// assert.GreaterOrEqual(t, "b", "a")
+// assert.GreaterOrEqual(t, "b", "b")
+func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
+
+// Less asserts that the first element is less than the second
+//
+// assert.Less(t, 1, 2)
+// assert.Less(t, float64(1), float64(2))
+// assert.Less(t, "a", "b")
+func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqual(t, 1, 2)
+// assert.LessOrEqual(t, 2, 2)
+// assert.LessOrEqual(t, "a", "b")
+// assert.LessOrEqual(t, "b", "b")
+func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
+
+// Positive asserts that the specified element is positive
+//
+// assert.Positive(t, 1)
+// assert.Positive(t, 1.23)
+func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
+}
+
+// Negative asserts that the specified element is negative
+//
+// assert.Negative(t, -1)
+// assert.Negative(t, -1.23)
+func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
+ zero := reflect.Zero(reflect.TypeOf(e))
+ return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
+}
+
+func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ e1Kind := reflect.ValueOf(e1).Kind()
+ e2Kind := reflect.ValueOf(e2).Kind()
+ if e1Kind != e2Kind {
+ return Fail(t, "Elements should be the same type", msgAndArgs...)
+ }
+
+ compareResult, isComparable := compare(e1, e2, e1Kind)
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...)
+ }
+
+ return true
+}
+
+func containsValue(values []CompareType, value CompareType) bool {
+ for _, v := range values {
+ if v == value {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..4dfd122
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,741 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatchf(t, [1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventuallyf(t, func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Eventuallyf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// assert.Exactlyf(t, int32(123), int64(123), "error message %s", "formatted")
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// assert.Greaterf(t, 2, 1, "error message %s", "formatted")
+// assert.Greaterf(t, float64(2), float64(1), "error message %s", "formatted")
+// assert.Greaterf(t, "b", "a", "error message %s", "formatted")
+func Greaterf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// assert.GreaterOrEqualf(t, 2, 1, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "a", "error message %s", "formatted")
+// assert.GreaterOrEqualf(t, "b", "b", "error message %s", "formatted")
+func GreaterOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(t, handler, method, url, values, str, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCodef(t, myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCodef(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(t, handler, method, url, values, statuscode, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(t, handler, method, url, values, append([]interface{}{msg}, args...)...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValuesf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// assert.IsDecreasingf(t, []int{2, 1, 0}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsDecreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// assert.IsIncreasingf(t, []int{1, 2, 3}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsIncreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasingf(t, []int{1, 1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []float{1, 2}, "error message %s", "formatted")
+// assert.IsNonDecreasingf(t, []string{"a", "b"}, "error message %s", "formatted")
+func IsNonDecreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// assert.IsNonIncreasingf(t, []int{2, 1, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []float{2, 1}, "error message %s", "formatted")
+// assert.IsNonIncreasingf(t, []string{"b", "a"}, "error message %s", "formatted")
+func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// assert.Lessf(t, 1, 2, "error message %s", "formatted")
+// assert.Lessf(t, float64(1), float64(2), "error message %s", "formatted")
+// assert.Lessf(t, "a", "b", "error message %s", "formatted")
+func Lessf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// assert.LessOrEqualf(t, 1, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, 2, 2, "error message %s", "formatted")
+// assert.LessOrEqualf(t, "a", "b", "error message %s", "formatted")
+// assert.LessOrEqualf(t, "b", "b", "error message %s", "formatted")
+func LessOrEqualf(t TestingT, e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(t, e1, e2, append([]interface{}{msg}, args...)...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// assert.Negativef(t, -1, "error message %s", "formatted")
+// assert.Negativef(t, -1.23, "error message %s", "formatted")
+func Negativef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Neverf(t, func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func Neverf(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(t, condition, waitFor, tick, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExistsf(t TestingT, path string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(t, path, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValuesf(t, obj1, obj2, "error message %s", "formatted")
+func NotEqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// assert.NotSamef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithErrorf(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithErrorf(t TestingT, errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(t, errString, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// assert.Positivef(t, 1, "error message %s", "formatted")
+// assert.Positivef(t, 1.23, "error message %s", "formatted")
+func Positivef(t TestingT, e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(t, e, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// assert.Samef(t, ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Samef(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func YAMLEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..d2bb0b8
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ if h, ok := t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
new file mode 100644
index 0000000..25337a6
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -0,0 +1,1470 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Condition uses a Comparison to assert a complex condition.
+func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Condition(a.t, comp, msgAndArgs...)
+}
+
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Conditionf(a.t, comp, msg, args...)
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
+func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Contains(a.t, s, contains, msgAndArgs...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExists(a.t, path, msgAndArgs...)
+}
+
+// DirExistsf checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func (a *Assertions) DirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return DirExistsf(a.t, path, msg, args...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatch([1, 3, 2, 3], [1, 3, 3, 2])
+func (a *Assertions) ElementsMatch(listA interface{}, listB interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatch(a.t, listA, listB, msgAndArgs...)
+}
+
+// ElementsMatchf asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// a.ElementsMatchf([1, 3, 2, 3], [1, 3, 3, 2], "error message %s", "formatted")
+func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ElementsMatchf(a.t, listA, listB, msg, args...)
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Empty(obj)
+func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Empty(a.t, object, msgAndArgs...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Emptyf(a.t, object, msg, args...)
+}
+
+// Equal asserts that two objects are equal.
+//
+// a.Equal(123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equal(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualError(err, expectedErrorString)
+func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualError(a.t, theError, errString, msgAndArgs...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValues(uint32(123), int32(123))
+func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
+// }
+func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Error(a.t, err, msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAs(err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorAsf asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorAsf(a.t, err, target, msg, args...)
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// ErrorIsf asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return ErrorIsf(a.t, err, target, msg, args...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Errorf(a.t, err, msg, args...)
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventually(func() bool { return true; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Eventually(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventually(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Eventuallyf asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// a.Eventuallyf(func() bool { return true; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Eventuallyf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Eventuallyf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// a.Exactly(int32(123), int64(123))
+func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactly(a.t, expected, actual, msgAndArgs...)
+}
+
+// Exactlyf asserts that two objects are equal in value and type.
+//
+// a.Exactlyf(int32(123), int64(123), "error message %s", "formatted")
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
+// Fail reports a failure through
+func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNow fails test
+func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNow(a.t, failureMessage, msgAndArgs...)
+}
+
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
+// False asserts that the specified value is false.
+//
+// a.False(myBool)
+func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return False(a.t, value, msgAndArgs...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Falsef(a.t, value, msg, args...)
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExists(a.t, path, msgAndArgs...)
+}
+
+// FileExistsf checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func (a *Assertions) FileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return FileExistsf(a.t, path, msg, args...)
+}
+
+// Greater asserts that the first element is greater than the second
+//
+// a.Greater(2, 1)
+// a.Greater(float64(2), float64(1))
+// a.Greater("b", "a")
+func (a *Assertions) Greater(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greater(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqual asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqual(2, 1)
+// a.GreaterOrEqual(2, 2)
+// a.GreaterOrEqual("b", "a")
+// a.GreaterOrEqual("b", "b")
+func (a *Assertions) GreaterOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// GreaterOrEqualf asserts that the first element is greater than or equal to the second
+//
+// a.GreaterOrEqualf(2, 1, "error message %s", "formatted")
+// a.GreaterOrEqualf(2, 2, "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "a", "error message %s", "formatted")
+// a.GreaterOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) GreaterOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return GreaterOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Greaterf asserts that the first element is greater than the second
+//
+// a.Greaterf(2, 1, "error message %s", "formatted")
+// a.Greaterf(float64(2), float64(1), "error message %s", "formatted")
+// a.Greaterf("b", "a", "error message %s", "formatted")
+func (a *Assertions) Greaterf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Greaterf(a.t, e1, e2, msg, args...)
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContains(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContains(a.t, handler, method, url, values, str, msgAndArgs...)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str, msg, args...)
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPError(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPErrorf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirect(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPRedirectf(a.t, handler, method, url, values, msg, args...)
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCode(myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCode(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCode(a.t, handler, method, url, values, statuscode, msgAndArgs...)
+}
+
+// HTTPStatusCodef asserts that a specified handler returns a specified status code.
+//
+// a.HTTPStatusCodef(myHandler, "GET", "/notImplemented", nil, 501, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPStatusCodef(handler http.HandlerFunc, method string, url string, values url.Values, statuscode int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPStatusCodef(a.t, handler, method, url, values, statuscode, msg, args...)
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccess(a.t, handler, method, url, values, msgAndArgs...)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return HTTPSuccessf(a.t, handler, method, url, values, msg, args...)
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// a.Implements((*MyInterface)(nil), new(MyObject))
+func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implements(a.t, interfaceObject, object, msgAndArgs...)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// a.InDelta(math.Pi, 22/7.0, 0.01)
+func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDelta(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValues(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValues(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaMapValuesf is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func (a *Assertions) InDeltaMapValuesf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaMapValuesf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, 22/7.0, 0.01, "error message %s", "formatted")
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// a.IsDecreasing([]int{2, 1, 0})
+// a.IsDecreasing([]float{2, 1})
+// a.IsDecreasing([]string{"b", "a"})
+func (a *Assertions) IsDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsDecreasingf asserts that the collection is decreasing
+//
+// a.IsDecreasingf([]int{2, 1, 0}, "error message %s", "formatted")
+// a.IsDecreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsDecreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsDecreasingf(a.t, object, msg, args...)
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// a.IsIncreasing([]int{1, 2, 3})
+// a.IsIncreasing([]float{1, 2})
+// a.IsIncreasing([]string{"a", "b"})
+func (a *Assertions) IsIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsIncreasingf asserts that the collection is increasing
+//
+// a.IsIncreasingf([]int{1, 2, 3}, "error message %s", "formatted")
+// a.IsIncreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsIncreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsIncreasingf(a.t, object, msg, args...)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// a.IsNonDecreasing([]int{1, 1, 2})
+// a.IsNonDecreasing([]float{1, 2})
+// a.IsNonDecreasing([]string{"a", "b"})
+func (a *Assertions) IsNonDecreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonDecreasingf asserts that the collection is not decreasing
+//
+// a.IsNonDecreasingf([]int{1, 1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]float{1, 2}, "error message %s", "formatted")
+// a.IsNonDecreasingf([]string{"a", "b"}, "error message %s", "formatted")
+func (a *Assertions) IsNonDecreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonDecreasingf(a.t, object, msg, args...)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// a.IsNonIncreasing([]int{2, 1, 1})
+// a.IsNonIncreasing([]float{2, 1})
+// a.IsNonIncreasing([]string{"b", "a"})
+func (a *Assertions) IsNonIncreasing(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasing(a.t, object, msgAndArgs...)
+}
+
+// IsNonIncreasingf asserts that the collection is not increasing
+//
+// a.IsNonIncreasingf([]int{2, 1, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]float{2, 1}, "error message %s", "formatted")
+// a.IsNonIncreasingf([]string{"b", "a"}, "error message %s", "formatted")
+func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsNonIncreasingf(a.t, object, msg, args...)
+}
+
+// IsType asserts that the specified objects are of the same type.
+func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsType(a.t, expectedType, object, msgAndArgs...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// a.Len(mySlice, 3)
+func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Len(a.t, object, length, msgAndArgs...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lenf(a.t, object, length, msg, args...)
+}
+
+// Less asserts that the first element is less than the second
+//
+// a.Less(1, 2)
+// a.Less(float64(1), float64(2))
+// a.Less("a", "b")
+func (a *Assertions) Less(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Less(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqual asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqual(1, 2)
+// a.LessOrEqual(2, 2)
+// a.LessOrEqual("a", "b")
+// a.LessOrEqual("b", "b")
+func (a *Assertions) LessOrEqual(e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqual(a.t, e1, e2, msgAndArgs...)
+}
+
+// LessOrEqualf asserts that the first element is less than or equal to the second
+//
+// a.LessOrEqualf(1, 2, "error message %s", "formatted")
+// a.LessOrEqualf(2, 2, "error message %s", "formatted")
+// a.LessOrEqualf("a", "b", "error message %s", "formatted")
+// a.LessOrEqualf("b", "b", "error message %s", "formatted")
+func (a *Assertions) LessOrEqualf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return LessOrEqualf(a.t, e1, e2, msg, args...)
+}
+
+// Lessf asserts that the first element is less than the second
+//
+// a.Lessf(1, 2, "error message %s", "formatted")
+// a.Lessf(float64(1), float64(2), "error message %s", "formatted")
+// a.Lessf("a", "b", "error message %s", "formatted")
+func (a *Assertions) Lessf(e1 interface{}, e2 interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Lessf(a.t, e1, e2, msg, args...)
+}
+
+// Negative asserts that the specified element is negative
+//
+// a.Negative(-1)
+// a.Negative(-1.23)
+func (a *Assertions) Negative(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negative(a.t, e, msgAndArgs...)
+}
+
+// Negativef asserts that the specified element is negative
+//
+// a.Negativef(-1, "error message %s", "formatted")
+// a.Negativef(-1.23, "error message %s", "formatted")
+func (a *Assertions) Negativef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Negativef(a.t, e, msg, args...)
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Never(func() bool { return false; }, time.Second, 10*time.Millisecond)
+func (a *Assertions) Never(condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Never(a.t, condition, waitFor, tick, msgAndArgs...)
+}
+
+// Neverf asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// a.Neverf(func() bool { return false; }, time.Second, 10*time.Millisecond, "error message %s", "formatted")
+func (a *Assertions) Neverf(condition func() bool, waitFor time.Duration, tick time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Neverf(a.t, condition, waitFor, tick, msg, args...)
+}
+
+// Nil asserts that the specified object is nil.
+//
+// a.Nil(err)
+func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nil(a.t, object, msgAndArgs...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Nilf(a.t, object, msg, args...)
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExists(a.t, path, msgAndArgs...)
+}
+
+// NoDirExistsf checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func (a *Assertions) NoDirExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoDirExistsf(a.t, path, msg, args...)
+}
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoError(err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoError(a.t, err, msgAndArgs...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoErrorf(a.t, err, msg, args...)
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExists(path string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExists(a.t, path, msgAndArgs...)
+}
+
+// NoFileExistsf checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func (a *Assertions) NoFileExistsf(path string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NoFileExistsf(a.t, path, msg, args...)
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
+func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContains(a.t, s, contains, msgAndArgs...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmpty(obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmpty(a.t, object, msgAndArgs...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// a.NotEqual(obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqual(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValues(obj1, obj2)
+func (a *Assertions) NotEqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValues(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotEqualValuesf asserts that two objects are not equal even when converted to the same type
+//
+// a.NotEqualValuesf(obj1, obj2, "error message %s", "formatted")
+func (a *Assertions) NotEqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIs(a.t, err, target, msgAndArgs...)
+}
+
+// NotErrorIsf asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotErrorIsf(a.t, err, target, msg, args...)
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// a.NotNil(err)
+func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNil(a.t, object, msgAndArgs...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotNilf(a.t, object, msg, args...)
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanics(func(){ RemainCalm() })
+func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanics(a.t, f, msgAndArgs...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
+// a.NotRegexp("^start", "it's not starting")
+func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexp(a.t, rx, str, msgAndArgs...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts"), "it's starting", "error message %s", "formatted")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// a.NotSame(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSame(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSame(a.t, expected, actual, msgAndArgs...)
+}
+
+// NotSamef asserts that two pointers do not reference the same object.
+//
+// a.NotSamef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSamef(a.t, expected, actual, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZero(a.t, i, msgAndArgs...)
+}
+
+// NotZerof asserts that i is not the zero value for its type.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return NotZerof(a.t, i, msg, args...)
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panics(func(){ GoCrazy() })
+func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panics(a.t, f, msgAndArgs...)
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithError("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithError(errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithError(a.t, errString, f, msgAndArgs...)
+}
+
+// PanicsWithErrorf asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// a.PanicsWithErrorf("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithErrorf(errString string, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithErrorf(a.t, errString, f, msg, args...)
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Panicsf(a.t, f, msg, args...)
+}
+
+// Positive asserts that the specified element is positive
+//
+// a.Positive(1)
+// a.Positive(1.23)
+func (a *Assertions) Positive(e interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positive(a.t, e, msgAndArgs...)
+}
+
+// Positivef asserts that the specified element is positive
+//
+// a.Positivef(1, "error message %s", "formatted")
+// a.Positivef(1.23, "error message %s", "formatted")
+func (a *Assertions) Positivef(e interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Positivef(a.t, e, msg, args...)
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// a.Regexp(regexp.MustCompile("start"), "it's starting")
+// a.Regexp("start...$", "it's not starting")
+func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexp(a.t, rx, str, msgAndArgs...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start"), "it's starting", "error message %s", "formatted")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// a.Same(ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Same(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Same(a.t, expected, actual, msgAndArgs...)
+}
+
+// Samef asserts that two pointers reference the same object.
+//
+// a.Samef(ptr1, ptr2, "error message %s", "formatted")
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Samef(a.t, expected, actual, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
+// True asserts that the specified value is true.
+//
+// a.True(myBool)
+func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return True(a.t, value, msgAndArgs...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Truef(a.t, value, msg, args...)
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
+func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEq(expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEq(a.t, expected, actual, msgAndArgs...)
+}
+
+// YAMLEqf asserts that two YAML strings are equivalent.
+func (a *Assertions) YAMLEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return YAMLEqf(a.t, expected, actual, msg, args...)
+}
+
+// Zero asserts that i is the zero value for its type.
+func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zero(a.t, i, msgAndArgs...)
+}
+
+// Zerof asserts that i is the zero value for its type.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ if h, ok := a.t.(tHelper); ok {
+ h.Helper()
+ }
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
new file mode 100644
index 0000000..188bb9e
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl
@@ -0,0 +1,5 @@
+{{.CommentWithoutT "a"}}
+func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {
+ if h, ok := a.t.(tHelper); ok { h.Helper() }
+ return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_order.go b/vendor/github.com/stretchr/testify/assert/assertion_order.go
new file mode 100644
index 0000000..1c3b471
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_order.go
@@ -0,0 +1,81 @@
+package assert
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// isOrdered checks that collection contains orderable elements.
+func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
+ objKind := reflect.TypeOf(object).Kind()
+ if objKind != reflect.Slice && objKind != reflect.Array {
+ return false
+ }
+
+ objValue := reflect.ValueOf(object)
+ objLen := objValue.Len()
+
+ if objLen <= 1 {
+ return true
+ }
+
+ value := objValue.Index(0)
+ valueInterface := value.Interface()
+ firstValueKind := value.Kind()
+
+ for i := 1; i < objLen; i++ {
+ prevValue := value
+ prevValueInterface := valueInterface
+
+ value = objValue.Index(i)
+ valueInterface = value.Interface()
+
+ compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
+
+ if !isComparable {
+ return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...)
+ }
+
+ if !containsValue(allowedComparesResults, compareResult) {
+ return Fail(t, fmt.Sprintf(failMessage, prevValue, value), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// IsIncreasing asserts that the collection is increasing
+//
+// assert.IsIncreasing(t, []int{1, 2, 3})
+// assert.IsIncreasing(t, []float{1, 2})
+// assert.IsIncreasing(t, []string{"a", "b"})
+func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
+}
+
+// IsNonIncreasing asserts that the collection is not increasing
+//
+// assert.IsNonIncreasing(t, []int{2, 1, 1})
+// assert.IsNonIncreasing(t, []float{2, 1})
+// assert.IsNonIncreasing(t, []string{"b", "a"})
+func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
+}
+
+// IsDecreasing asserts that the collection is decreasing
+//
+// assert.IsDecreasing(t, []int{2, 1, 0})
+// assert.IsDecreasing(t, []float{2, 1})
+// assert.IsDecreasing(t, []string{"b", "a"})
+func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
+}
+
+// IsNonDecreasing asserts that the collection is not decreasing
+//
+// assert.IsNonDecreasing(t, []int{1, 1, 2})
+// assert.IsNonDecreasing(t, []float{1, 2})
+// assert.IsNonDecreasing(t, []string{"a", "b"})
+func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
new file mode 100644
index 0000000..bcac440
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -0,0 +1,1774 @@
+package assert
+
+import (
+ "bufio"
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "os"
+ "reflect"
+ "regexp"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+ "unicode"
+ "unicode/utf8"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/pmezard/go-difflib/difflib"
+ yaml "gopkg.in/yaml.v3"
+)
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
+
+// TestingT is an interface wrapper around *testing.T
+type TestingT interface {
+ Errorf(format string, args ...interface{})
+}
+
+// ComparisonAssertionFunc is a common function prototype when comparing two values. Can be useful
+// for table driven tests.
+type ComparisonAssertionFunc func(TestingT, interface{}, interface{}, ...interface{}) bool
+
+// ValueAssertionFunc is a common function prototype when validating a single value. Can be useful
+// for table driven tests.
+type ValueAssertionFunc func(TestingT, interface{}, ...interface{}) bool
+
+// BoolAssertionFunc is a common function prototype when validating a bool value. Can be useful
+// for table driven tests.
+type BoolAssertionFunc func(TestingT, bool, ...interface{}) bool
+
+// ErrorAssertionFunc is a common function prototype when validating an error value. Can be useful
+// for table driven tests.
+type ErrorAssertionFunc func(TestingT, error, ...interface{}) bool
+
+// Comparison is a custom function that returns true on success and false on failure
+type Comparison func() (success bool)
+
+/*
+ Helper functions
+*/
+
+// ObjectsAreEqual determines if two objects are considered equal.
+//
+// This function does no assertion of any kind.
+func ObjectsAreEqual(expected, actual interface{}) bool {
+ if expected == nil || actual == nil {
+ return expected == actual
+ }
+
+ exp, ok := expected.([]byte)
+ if !ok {
+ return reflect.DeepEqual(expected, actual)
+ }
+
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ }
+ if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+}
+
+// ObjectsAreEqualValues gets whether two objects are equal, or if their
+// values are equal.
+func ObjectsAreEqualValues(expected, actual interface{}) bool {
+ if ObjectsAreEqual(expected, actual) {
+ return true
+ }
+
+ actualType := reflect.TypeOf(actual)
+ if actualType == nil {
+ return false
+ }
+ expectedValue := reflect.ValueOf(expected)
+ if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {
+ // Attempt comparison after type conversion
+ return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)
+ }
+
+ return false
+}
+
+/* CallerInfo is necessary because the assert functions use the testing object
+internally, causing it to print the file:line of the assert method, rather than where
+the problem actually occurred in calling code.*/
+
+// CallerInfo returns an array of strings containing the file and line number
+// of each stack frame leading from the current test to the assert call that
+// failed.
+func CallerInfo() []string {
+
+ var pc uintptr
+ var ok bool
+ var file string
+ var line int
+ var name string
+
+ callers := []string{}
+ for i := 0; ; i++ {
+ pc, file, line, ok = runtime.Caller(i)
+ if !ok {
+ // The breaks below failed to terminate the loop, and we ran off the
+ // end of the call stack.
+ break
+ }
+
+ // This is a huge edge case, but it will panic if this is the case, see #180
+ if file == "<autogenerated>" {
+ break
+ }
+
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ break
+ }
+ name = f.Name()
+
+ // testing.tRunner is the standard library function that calls
+ // tests. Subtests are called directly by tRunner, without going through
+ // the Test/Benchmark/Example function that contains the t.Run calls, so
+ // with subtests we should break when we hit tRunner, without adding it
+ // to the list of callers.
+ if name == "testing.tRunner" {
+ break
+ }
+
+ parts := strings.Split(file, "/")
+ file = parts[len(parts)-1]
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
+ }
+
+ // Drop the package
+ segments := strings.Split(name, ".")
+ name = segments[len(segments)-1]
+ if isTest(name, "Test") ||
+ isTest(name, "Benchmark") ||
+ isTest(name, "Example") {
+ break
+ }
+ }
+
+ return callers
+}
+
+// Stolen from the `go test` tool.
+// isTest tells whether name looks like a test (or benchmark, according to prefix).
+// It is a Test (say) if there is a character after Test that is not a lower-case letter.
+// We don't want TesticularCancer.
+func isTest(name, prefix string) bool {
+ if !strings.HasPrefix(name, prefix) {
+ return false
+ }
+ if len(name) == len(prefix) { // "Test" is ok
+ return true
+ }
+ r, _ := utf8.DecodeRuneInString(name[len(prefix):])
+ return !unicode.IsLower(r)
+}
+
+func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
+ if len(msgAndArgs) == 0 || msgAndArgs == nil {
+ return ""
+ }
+ if len(msgAndArgs) == 1 {
+ msg := msgAndArgs[0]
+ if msgAsStr, ok := msg.(string); ok {
+ return msgAsStr
+ }
+ return fmt.Sprintf("%+v", msg)
+ }
+ if len(msgAndArgs) > 1 {
+ return fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)
+ }
+ return ""
+}
+
+// Aligns the provided message so that all lines after the first line start at the same location as the first line.
+// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
+// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the
+// basis on which the alignment occurs).
+func indentMessageLines(message string, longestLabelLen int) string {
+ outBuf := new(bytes.Buffer)
+
+ for i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {
+ // no need to align first line because it starts at the correct location (after the label)
+ if i != 0 {
+ // append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
+ outBuf.WriteString("\n\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
+ }
+ outBuf.WriteString(scanner.Text())
+ }
+
+ return outBuf.String()
+}
+
+type failNower interface {
+ FailNow()
+}
+
+// FailNow fails test
+func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, failureMessage, msgAndArgs...)
+
+ // We cannot extend TestingT with FailNow() and
+ // maintain backwards compatibility, so we fallback
+ // to panicking when FailNow is not available in
+ // TestingT.
+ // See issue #263
+
+ if t, ok := t.(failNower); ok {
+ t.FailNow()
+ } else {
+ panic("test failed and t is missing `FailNow()`")
+ }
+ return false
+}
+
+// Fail reports a failure through
+func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ content := []labeledContent{
+ {"Error Trace", strings.Join(CallerInfo(), "\n\t\t\t")},
+ {"Error", failureMessage},
+ }
+
+ // Add test name if the Go version supports it
+ if n, ok := t.(interface {
+ Name() string
+ }); ok {
+ content = append(content, labeledContent{"Test", n.Name()})
+ }
+
+ message := messageFromMsgAndArgs(msgAndArgs...)
+ if len(message) > 0 {
+ content = append(content, labeledContent{"Messages", message})
+ }
+
+ t.Errorf("\n%s", ""+labeledOutput(content...))
+
+ return false
+}
+
+type labeledContent struct {
+ label string
+ content string
+}
+
+// labeledOutput returns a string consisting of the provided labeledContent. Each labeled output is appended in the following manner:
+//
+// \t{{label}}:{{align_spaces}}\t{{content}}\n
+//
+// The initial carriage return is required to undo/erase any padding added by testing.T.Errorf. The "\t{{label}}:" is for the label.
+// If a label is shorter than the longest label provided, padding spaces are added to make all the labels match in length. Once this
+// alignment is achieved, "\t{{content}}\n" is added for the output.
+//
+// If the content of the labeledOutput contains line breaks, the subsequent lines are aligned so that they start at the same location as the first line.
+func labeledOutput(content ...labeledContent) string {
+ longestLabel := 0
+ for _, v := range content {
+ if len(v.label) > longestLabel {
+ longestLabel = len(v.label)
+ }
+ }
+ var output string
+ for _, v := range content {
+ output += "\t" + v.label + ":" + strings.Repeat(" ", longestLabel-len(v.label)) + "\t" + indentMessageLines(v.content, longestLabel) + "\n"
+ }
+ return output
+}
+
+// Implements asserts that an object is implemented by the specified interface.
+//
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
+func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ interfaceType := reflect.TypeOf(interfaceObject).Elem()
+
+ if object == nil {
+ return Fail(t, fmt.Sprintf("Cannot check if nil implements %v", interfaceType), msgAndArgs...)
+ }
+ if !reflect.TypeOf(object).Implements(interfaceType) {
+ return Fail(t, fmt.Sprintf("%T must implement %v", object, interfaceType), msgAndArgs...)
+ }
+
+ return true
+}
+
+// IsType asserts that the specified objects are of the same type.
+func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {
+ return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Equal asserts that two objects are equal.
+//
+// assert.Equal(t, 123, 123)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if !ObjectsAreEqual(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if expected == nil && actual == nil {
+ return nil
+ }
+
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+// Same asserts that two pointers reference the same object.
+//
+// assert.Same(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf("Not same: \n"+
+ "expected: %p %#v\n"+
+ "actual : %p %#v", expected, expected, actual, actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotSame asserts that two pointers do not reference the same object.
+//
+// assert.NotSame(t, ptr1, ptr2)
+//
+// Both arguments must be pointer variables. Pointer variable sameness is
+// determined based on the equality of both type and value.
+func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if samePointers(expected, actual) {
+ return Fail(t, fmt.Sprintf(
+ "Expected and actual point to the same object: %p %#v",
+ expected, expected), msgAndArgs...)
+ }
+ return true
+}
+
+// samePointers compares two generic interface objects and returns whether
+// they point to the same object
+func samePointers(first, second interface{}) bool {
+ firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
+ if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
+ return false
+ }
+
+ firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
+ if firstType != secondType {
+ return false
+ }
+
+ // compare pointer addresses
+ return first == second
+}
+
+// formatUnequalValues takes two values of arbitrary types and returns string
+// representations appropriate to be presented to the user.
+//
+// If the values are not of like type, the returned strings will be prefixed
+// with the type name, and the value will be enclosed in parenthesis similar
+// to a type conversion in the Go grammar.
+func formatUnequalValues(expected, actual interface{}) (e string, a string) {
+ if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
+ return fmt.Sprintf("%T(%s)", expected, truncatingFormat(expected)),
+ fmt.Sprintf("%T(%s)", actual, truncatingFormat(actual))
+ }
+ switch expected.(type) {
+ case time.Duration:
+ return fmt.Sprintf("%v", expected), fmt.Sprintf("%v", actual)
+ }
+ return truncatingFormat(expected), truncatingFormat(actual)
+}
+
+// truncatingFormat formats the data and truncates it if it's too long.
+//
+// This helps keep formatted error messages lines from exceeding the
+// bufio.MaxScanTokenSize max line length that the go testing framework imposes.
+func truncatingFormat(data interface{}) string {
+ value := fmt.Sprintf("%#v", data)
+ max := bufio.MaxScanTokenSize - 100 // Give us some space the type info too if needed.
+ if len(value) > max {
+ value = value[0:max] + "<... truncated>"
+ }
+ return value
+}
+
+// EqualValues asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValues(t, uint32(123), int32(123))
+func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if !ObjectsAreEqualValues(expected, actual) {
+ diff := diff(expected, actual)
+ expected, actual = formatUnequalValues(expected, actual)
+ return Fail(t, fmt.Sprintf("Not equal: \n"+
+ "expected: %s\n"+
+ "actual : %s%s", expected, actual, diff), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Exactly asserts that two objects are equal in value and type.
+//
+// assert.Exactly(t, int32(123), int64(123))
+func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ aType := reflect.TypeOf(expected)
+ bType := reflect.TypeOf(actual)
+
+ if aType != bType {
+ return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
+ }
+
+ return Equal(t, expected, actual, msgAndArgs...)
+
+}
+
+// NotNil asserts that the specified object is not nil.
+//
+// assert.NotNil(t, err)
+func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if !isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Expected value not to be nil.", msgAndArgs...)
+}
+
+// containsKind checks if a specified kind in the slice of kinds.
+func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
+ for i := 0; i < len(kinds); i++ {
+ if kind == kinds[i] {
+ return true
+ }
+ }
+
+ return false
+}
+
+// isNil checks if a specified object is nil or not, without Failing.
+func isNil(object interface{}) bool {
+ if object == nil {
+ return true
+ }
+
+ value := reflect.ValueOf(object)
+ kind := value.Kind()
+ isNilableKind := containsKind(
+ []reflect.Kind{
+ reflect.Chan, reflect.Func,
+ reflect.Interface, reflect.Map,
+ reflect.Ptr, reflect.Slice},
+ kind)
+
+ if isNilableKind && value.IsNil() {
+ return true
+ }
+
+ return false
+}
+
+// Nil asserts that the specified object is nil.
+//
+// assert.Nil(t, err)
+func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ if isNil(object) {
+ return true
+ }
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Expected nil, but got: %#v", object), msgAndArgs...)
+}
+
+// isEmpty gets whether the specified object is considered empty or not.
+func isEmpty(object interface{}) bool {
+
+ // get nil case out of the way
+ if object == nil {
+ return true
+ }
+
+ objValue := reflect.ValueOf(object)
+
+ switch objValue.Kind() {
+ // collection types are empty when they have no element
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice:
+ return objValue.Len() == 0
+ // pointers are empty if nil or if the value they point to is empty
+ case reflect.Ptr:
+ if objValue.IsNil() {
+ return true
+ }
+ deref := objValue.Elem().Interface()
+ return isEmpty(deref)
+ // for all other types, compare against the zero value
+ default:
+ zero := reflect.Zero(objValue.Type())
+ return reflect.DeepEqual(object, zero.Interface())
+ }
+}
+
+// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Empty(t, obj)
+func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmpty(t, obj) {
+// assert.Equal(t, "two", obj[1])
+// }
+func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
+ pass := !isEmpty(object)
+ if !pass {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ Fail(t, fmt.Sprintf("Should NOT be empty, but was %v", object), msgAndArgs...)
+ }
+
+ return pass
+
+}
+
+// getLen try to get length of object.
+// return (false, 0) if impossible.
+func getLen(x interface{}) (ok bool, length int) {
+ v := reflect.ValueOf(x)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+ return true, v.Len()
+}
+
+// Len asserts that the specified object has specific length.
+// Len also fails if the object has a type that len() not accept.
+//
+// assert.Len(t, mySlice, 3)
+func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ ok, l := getLen(object)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...)
+ }
+
+ if l != length {
+ return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
+ }
+ return true
+}
+
+// True asserts that the specified value is true.
+//
+// assert.True(t, myBool)
+func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if !value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be true", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// False asserts that the specified value is false.
+//
+// assert.False(t, myBool)
+func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
+ if value {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "Should be false", msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqual asserts that the specified values are NOT equal.
+//
+// assert.NotEqual(t, obj1, obj2)
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
+
+ if ObjectsAreEqual(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotEqualValues asserts that two objects are not equal even when converted to the same type
+//
+// assert.NotEqualValues(t, obj1, obj2)
+func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if ObjectsAreEqualValues(expected, actual) {
+ return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
+ }
+
+ return true
+}
+
+// containsElement try loop over the list check if the list includes the element.
+// return (false, false) if impossible.
+// return (true, false) if element was not found.
+// return (true, true) if element was found.
+func includeElement(list interface{}, element interface{}) (ok, found bool) {
+
+ listValue := reflect.ValueOf(list)
+ listKind := reflect.TypeOf(list).Kind()
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ found = false
+ }
+ }()
+
+ if listKind == reflect.String {
+ elementValue := reflect.ValueOf(element)
+ return true, strings.Contains(listValue.String(), elementValue.String())
+ }
+
+ if listKind == reflect.Map {
+ mapKeys := listValue.MapKeys()
+ for i := 0; i < len(mapKeys); i++ {
+ if ObjectsAreEqual(mapKeys[i].Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+ }
+
+ for i := 0; i < listValue.Len(); i++ {
+ if ObjectsAreEqual(listValue.Index(i).Interface(), element) {
+ return true, true
+ }
+ }
+ return true, false
+
+}
+
+// Contains asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
+func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("%#v does not contain %#v", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
+func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ok, found := includeElement(s, contains)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
+ }
+ if found {
+ return Fail(t, fmt.Sprintf("\"%s\" should not contain \"%s\"", s, contains), msgAndArgs...)
+ }
+
+ return true
+
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if subset == nil {
+ return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
+// ElementsMatch asserts that the specified listA(array, slice...) is equal to specified
+// listB(array, slice...) ignoring the order of the elements. If there are duplicate elements,
+// the number of appearances of each of them in both lists should match.
+//
+// assert.ElementsMatch(t, [1, 3, 2, 3], [1, 3, 3, 2])
+func ElementsMatch(t TestingT, listA, listB interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if isEmpty(listA) && isEmpty(listB) {
+ return true
+ }
+
+ if !isList(t, listA, msgAndArgs...) || !isList(t, listB, msgAndArgs...) {
+ return false
+ }
+
+ extraA, extraB := diffLists(listA, listB)
+
+ if len(extraA) == 0 && len(extraB) == 0 {
+ return true
+ }
+
+ return Fail(t, formatListDiff(listA, listB, extraA, extraB), msgAndArgs...)
+}
+
+// isList checks that the provided value is array or slice.
+func isList(t TestingT, list interface{}, msgAndArgs ...interface{}) (ok bool) {
+ kind := reflect.TypeOf(list).Kind()
+ if kind != reflect.Array && kind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s, expecting array or slice", list, kind),
+ msgAndArgs...)
+ }
+ return true
+}
+
+// diffLists diffs two arrays/slices and returns slices of elements that are only in A and only in B.
+// If some element is present multiple times, each instance is counted separately (e.g. if something is 2x in A and
+// 5x in B, it will be 0x in extraA and 3x in extraB). The order of items in both lists is ignored.
+func diffLists(listA, listB interface{}) (extraA, extraB []interface{}) {
+ aValue := reflect.ValueOf(listA)
+ bValue := reflect.ValueOf(listB)
+
+ aLen := aValue.Len()
+ bLen := bValue.Len()
+
+ // Mark indexes in bValue that we already used
+ visited := make([]bool, bLen)
+ for i := 0; i < aLen; i++ {
+ element := aValue.Index(i).Interface()
+ found := false
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ if ObjectsAreEqual(bValue.Index(j).Interface(), element) {
+ visited[j] = true
+ found = true
+ break
+ }
+ }
+ if !found {
+ extraA = append(extraA, element)
+ }
+ }
+
+ for j := 0; j < bLen; j++ {
+ if visited[j] {
+ continue
+ }
+ extraB = append(extraB, bValue.Index(j).Interface())
+ }
+
+ return
+}
+
+func formatListDiff(listA, listB interface{}, extraA, extraB []interface{}) string {
+ var msg bytes.Buffer
+
+ msg.WriteString("elements differ")
+ if len(extraA) > 0 {
+ msg.WriteString("\n\nextra elements in list A:\n")
+ msg.WriteString(spewConfig.Sdump(extraA))
+ }
+ if len(extraB) > 0 {
+ msg.WriteString("\n\nextra elements in list B:\n")
+ msg.WriteString(spewConfig.Sdump(extraB))
+ }
+ msg.WriteString("\n\nlistA:\n")
+ msg.WriteString(spewConfig.Sdump(listA))
+ msg.WriteString("\n\nlistB:\n")
+ msg.WriteString(spewConfig.Sdump(listB))
+
+ return msg.String()
+}
+
+// Condition uses a Comparison to assert a complex condition.
+func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ result := comp()
+ if !result {
+ Fail(t, "Condition failed!", msgAndArgs...)
+ }
+ return result
+}
+
+// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics
+// methods, and represents a simple func that takes no arguments, and returns nothing.
+type PanicTestFunc func()
+
+// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
+func didPanic(f PanicTestFunc) (bool, interface{}, string) {
+
+ didPanic := false
+ var message interface{}
+ var stack string
+ func() {
+
+ defer func() {
+ if message = recover(); message != nil {
+ didPanic = true
+ stack = string(debug.Stack())
+ }
+ }()
+
+ // call the target function
+ f()
+
+ }()
+
+ return didPanic, message, stack
+
+}
+
+// Panics asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panics(t, func(){ GoCrazy() })
+func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, _ := didPanic(f); !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, expected, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// PanicsWithError asserts that the code inside the specified PanicTestFunc
+// panics, and that the recovered panic value is an error that satisfies the
+// EqualError comparison.
+//
+// assert.PanicsWithError(t, "crazy error", func(){ GoCrazy() })
+func PanicsWithError(t TestingT, errString string, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ funcDidPanic, panicValue, panickedStack := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\tPanic value:\t%#v", f, panicValue), msgAndArgs...)
+ }
+ panicErr, ok := panicValue.(error)
+ if !ok || panicErr.Error() != errString {
+ return Fail(t, fmt.Sprintf("func %#v should panic with error message:\t%#v\n\tPanic value:\t%#v\n\tPanic stack:\t%s", f, errString, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanics(t, func(){ RemainCalm() })
+func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ if funcDidPanic, panicValue, panickedStack := didPanic(f); funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should not panic\n\tPanic value:\t%v\n\tPanic stack:\t%s", f, panicValue, panickedStack), msgAndArgs...)
+ }
+
+ return true
+}
+
+// WithinDuration asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
+func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ dt := expected.Sub(actual)
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+func toFloat(x interface{}) (float64, bool) {
+ var xf float64
+ xok := true
+
+ switch xn := x.(type) {
+ case uint:
+ xf = float64(xn)
+ case uint8:
+ xf = float64(xn)
+ case uint16:
+ xf = float64(xn)
+ case uint32:
+ xf = float64(xn)
+ case uint64:
+ xf = float64(xn)
+ case int:
+ xf = float64(xn)
+ case int8:
+ xf = float64(xn)
+ case int16:
+ xf = float64(xn)
+ case int32:
+ xf = float64(xn)
+ case int64:
+ xf = float64(xn)
+ case float32:
+ xf = float64(xn)
+ case float64:
+ xf = xn
+ case time.Duration:
+ xf = float64(xn)
+ default:
+ xok = false
+ }
+
+ return xf, xok
+}
+
+// InDelta asserts that the two numerals are within delta of each other.
+//
+// assert.InDelta(t, math.Pi, 22/7.0, 0.01)
+func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ af, aok := toFloat(expected)
+ bf, bok := toFloat(actual)
+
+ if !aok || !bok {
+ return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
+ }
+
+ if math.IsNaN(af) {
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
+ }
+
+ if math.IsNaN(bf) {
+ return Fail(t, fmt.Sprintf("Expected %v with delta %v, but was NaN", expected, delta), msgAndArgs...)
+ }
+
+ dt := af - bf
+ if dt < -delta || dt > delta {
+ return Fail(t, fmt.Sprintf("Max difference between %v and %v allowed is %v, but difference was %v", expected, actual, delta, dt), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InDeltaSlice is the same as InDelta, except it compares two slices.
+func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+// InDeltaMapValues is the same as InDelta, but it compares all values between two maps. Both maps must have exactly the same keys.
+func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Map ||
+ reflect.TypeOf(expected).Kind() != reflect.Map {
+ return Fail(t, "Arguments must be maps", msgAndArgs...)
+ }
+
+ expectedMap := reflect.ValueOf(expected)
+ actualMap := reflect.ValueOf(actual)
+
+ if expectedMap.Len() != actualMap.Len() {
+ return Fail(t, "Arguments must have the same number of keys", msgAndArgs...)
+ }
+
+ for _, k := range expectedMap.MapKeys() {
+ ev := expectedMap.MapIndex(k)
+ av := actualMap.MapIndex(k)
+
+ if !ev.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in expected map", k), msgAndArgs...)
+ }
+
+ if !av.IsValid() {
+ return Fail(t, fmt.Sprintf("missing key %q in actual map", k), msgAndArgs...)
+ }
+
+ if !InDelta(
+ t,
+ ev.Interface(),
+ av.Interface(),
+ delta,
+ msgAndArgs...,
+ ) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func calcRelativeError(expected, actual interface{}) (float64, error) {
+ af, aok := toFloat(expected)
+ if !aok {
+ return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
+ }
+ if math.IsNaN(af) {
+ return 0, errors.New("expected value must not be NaN")
+ }
+ if af == 0 {
+ return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
+ }
+ bf, bok := toFloat(actual)
+ if !bok {
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
+ }
+ if math.IsNaN(bf) {
+ return 0, errors.New("actual value must not be NaN")
+ }
+
+ return math.Abs(af-bf) / math.Abs(af), nil
+}
+
+// InEpsilon asserts that expected and actual have a relative error less than epsilon
+func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if math.IsNaN(epsilon) {
+ return Fail(t, "epsilon must not be NaN")
+ }
+ actualEpsilon, err := calcRelativeError(expected, actual)
+ if err != nil {
+ return Fail(t, err.Error(), msgAndArgs...)
+ }
+ if actualEpsilon > epsilon {
+ return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
+ }
+
+ return true
+}
+
+// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if expected == nil || actual == nil ||
+ reflect.TypeOf(actual).Kind() != reflect.Slice ||
+ reflect.TypeOf(expected).Kind() != reflect.Slice {
+ return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
+ }
+
+ actualSlice := reflect.ValueOf(actual)
+ expectedSlice := reflect.ValueOf(expected)
+
+ for i := 0; i < actualSlice.Len(); i++ {
+ result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)
+ if !result {
+ return result
+ }
+ }
+
+ return true
+}
+
+/*
+ Errors
+*/
+
+// NoError asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoError(t, err) {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err != nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, fmt.Sprintf("Received unexpected error:\n%+v", err), msgAndArgs...)
+ }
+
+ return true
+}
+
+// Error asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
+// }
+func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
+ if err == nil {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ return Fail(t, "An error is expected but got nil.", msgAndArgs...)
+ }
+
+ return true
+}
+
+// EqualError asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualError(t, err, expectedErrorString)
+func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !Error(t, theError, msgAndArgs...) {
+ return false
+ }
+ expected := errString
+ actual := theError.Error()
+ // don't need to use deep equals here, we know they are both strings
+ if expected != actual {
+ return Fail(t, fmt.Sprintf("Error message not equal:\n"+
+ "expected: %q\n"+
+ "actual : %q", expected, actual), msgAndArgs...)
+ }
+ return true
+}
+
+// matchRegexp return true if a specified regexp matches a string.
+func matchRegexp(rx interface{}, str interface{}) bool {
+
+ var r *regexp.Regexp
+ if rr, ok := rx.(*regexp.Regexp); ok {
+ r = rr
+ } else {
+ r = regexp.MustCompile(fmt.Sprint(rx))
+ }
+
+ return (r.FindStringIndex(fmt.Sprint(str)) != nil)
+
+}
+
+// Regexp asserts that a specified regexp matches a string.
+//
+// assert.Regexp(t, regexp.MustCompile("start"), "it's starting")
+// assert.Regexp(t, "start...$", "it's not starting")
+func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ match := matchRegexp(rx, str)
+
+ if !match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return match
+}
+
+// NotRegexp asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting")
+// assert.NotRegexp(t, "^start", "it's not starting")
+func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ match := matchRegexp(rx, str)
+
+ if match {
+ Fail(t, fmt.Sprintf("Expect \"%v\" to NOT match \"%v\"", str, rx), msgAndArgs...)
+ }
+
+ return !match
+
+}
+
+// Zero asserts that i is the zero value for its type.
+func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// NotZero asserts that i is not the zero value for its type.
+func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {
+ return Fail(t, fmt.Sprintf("Should not be zero, but was %v", i), msgAndArgs...)
+ }
+ return true
+}
+
+// FileExists checks whether a file exists in the given path. It also fails if
+// the path points to a directory or there is an error when trying to check the file.
+func FileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a directory", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoFileExists checks whether a file does not exist in a given path. It fails
+// if the path points to an existing _file_ only.
+func NoFileExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ return true
+ }
+ if info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("file %q exists", path), msgAndArgs...)
+}
+
+// DirExists checks whether a directory exists in the given path. It also fails
+// if the path is a file rather a directory or there is an error checking whether it exists.
+func DirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return Fail(t, fmt.Sprintf("unable to find file %q", path), msgAndArgs...)
+ }
+ return Fail(t, fmt.Sprintf("error when running os.Lstat(%q): %s", path, err), msgAndArgs...)
+ }
+ if !info.IsDir() {
+ return Fail(t, fmt.Sprintf("%q is a file", path), msgAndArgs...)
+ }
+ return true
+}
+
+// NoDirExists checks whether a directory does not exist in the given path.
+// It fails if the path points to an existing _directory_ only.
+func NoDirExists(t TestingT, path string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ info, err := os.Lstat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return true
+ }
+ return true
+ }
+ if !info.IsDir() {
+ return true
+ }
+ return Fail(t, fmt.Sprintf("directory %q exists", path), msgAndArgs...)
+}
+
+// JSONEq asserts that two JSON strings are equivalent.
+//
+// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
+func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedJSONAsInterface, actualJSONAsInterface interface{}
+
+ if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)
+}
+
+// YAMLEq asserts that two YAML strings are equivalent.
+func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ var expectedYAMLAsInterface, actualYAMLAsInterface interface{}
+
+ if err := yaml.Unmarshal([]byte(expected), &expectedYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
+ }
+
+ if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
+ return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
+ }
+
+ return Equal(t, expectedYAMLAsInterface, actualYAMLAsInterface, msgAndArgs...)
+}
+
+func typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {
+ t := reflect.TypeOf(v)
+ k := t.Kind()
+
+ if k == reflect.Ptr {
+ t = t.Elem()
+ k = t.Kind()
+ }
+ return t, k
+}
+
+// diff returns a diff of both values as long as both are of the same type and
+// are a struct, map, slice, array or string. Otherwise it returns an empty string.
+func diff(expected interface{}, actual interface{}) string {
+ if expected == nil || actual == nil {
+ return ""
+ }
+
+ et, ek := typeAndKind(expected)
+ at, _ := typeAndKind(actual)
+
+ if et != at {
+ return ""
+ }
+
+ if ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array && ek != reflect.String {
+ return ""
+ }
+
+ var e, a string
+ if et != reflect.TypeOf("") {
+ e = spewConfig.Sdump(expected)
+ a = spewConfig.Sdump(actual)
+ } else {
+ e = reflect.ValueOf(expected).String()
+ a = reflect.ValueOf(actual).String()
+ }
+
+ diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
+ A: difflib.SplitLines(e),
+ B: difflib.SplitLines(a),
+ FromFile: "Expected",
+ FromDate: "",
+ ToFile: "Actual",
+ ToDate: "",
+ Context: 1,
+ })
+
+ return "\n\nDiff:\n" + diff
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
+var spewConfig = spew.ConfigState{
+ Indent: " ",
+ DisablePointerAddresses: true,
+ DisableCapacities: true,
+ SortKeys: true,
+ DisableMethods: true,
+ MaxDepth: 10,
+}
+
+type tHelper interface {
+ Helper()
+}
+
+// Eventually asserts that given condition will be met in waitFor time,
+// periodically checking target function each tick.
+//
+// assert.Eventually(t, func() bool { return true; }, time.Second, 10*time.Millisecond)
+func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return Fail(t, "Condition never satisfied", msgAndArgs...)
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return true
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// Never asserts that the given condition doesn't satisfy in waitFor time,
+// periodically checking the target function each tick.
+//
+// assert.Never(t, func() bool { return false; }, time.Second, 10*time.Millisecond)
+func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.Duration, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+
+ ch := make(chan bool, 1)
+
+ timer := time.NewTimer(waitFor)
+ defer timer.Stop()
+
+ ticker := time.NewTicker(tick)
+ defer ticker.Stop()
+
+ for tick := ticker.C; ; {
+ select {
+ case <-timer.C:
+ return true
+ case <-tick:
+ tick = nil
+ go func() { ch <- condition() }()
+ case v := <-ch:
+ if v {
+ return Fail(t, "Condition satisfied", msgAndArgs...)
+ }
+ tick = ticker.C
+ }
+ }
+}
+
+// ErrorIs asserts that at least one of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// NotErrorIs asserts that at none of the errors in err's chain matches target.
+// This is a wrapper for errors.Is.
+func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if !errors.Is(err, target) {
+ return true
+ }
+
+ var expectedText string
+ if target != nil {
+ expectedText = target.Error()
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
+ "found: %q\n"+
+ "in chain: %s", expectedText, chain,
+ ), msgAndArgs...)
+}
+
+// ErrorAs asserts that at least one of the errors in err's chain matches target, and if so, sets target to that error value.
+// This is a wrapper for errors.As.
+func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ if errors.As(err, target) {
+ return true
+ }
+
+ chain := buildErrorChainString(err)
+
+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
+ "expected: %q\n"+
+ "in chain: %s", target, chain,
+ ), msgAndArgs...)
+}
+
+func buildErrorChainString(err error) string {
+ if err == nil {
+ return ""
+ }
+
+ e := errors.Unwrap(err)
+ chain := fmt.Sprintf("%q", err.Error())
+ for e != nil {
+ chain += fmt.Sprintf("\n\t%q", e.Error())
+ e = errors.Unwrap(e)
+ }
+ return chain
+}
diff --git a/vendor/github.com/stretchr/testify/assert/doc.go b/vendor/github.com/stretchr/testify/assert/doc.go
new file mode 100644
index 0000000..c9dccc4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/doc.go
@@ -0,0 +1,45 @@
+// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
+//
+// Example Usage
+//
+// The following is a complete example using assert in a standard test function:
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(t, a, b, "The two words should be the same.")
+//
+// }
+//
+// if you assert many times, use the format below:
+//
+// import (
+// "testing"
+// "github.com/stretchr/testify/assert"
+// )
+//
+// func TestSomething(t *testing.T) {
+// assert := assert.New(t)
+//
+// var a string = "Hello"
+// var b string = "Hello"
+//
+// assert.Equal(a, b, "The two words should be the same.")
+// }
+//
+// Assertions
+//
+// Assertions allow you to easily write test code, and are global funcs in the `assert` package.
+// All assertion functions take, as the first argument, the `*testing.T` object provided by the
+// testing framework. This allows the assertion funcs to write the failings and other details to
+// the correct place.
+//
+// Every assertion function also takes an optional string message as the final argument,
+// allowing custom error messages to be appended to the message the assertion method outputs.
+package assert
diff --git a/vendor/github.com/stretchr/testify/assert/errors.go b/vendor/github.com/stretchr/testify/assert/errors.go
new file mode 100644
index 0000000..ac9dc9d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/errors.go
@@ -0,0 +1,10 @@
+package assert
+
+import (
+ "errors"
+)
+
+// AnError is an error instance useful for testing. If the code does not care
+// about error specifics, and only needs to return the error for example, this
+// error should be used to make the test code more readable.
+var AnError = errors.New("assert.AnError general error for testing")
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
new file mode 100644
index 0000000..df189d2
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -0,0 +1,16 @@
+package assert
+
+// Assertions provides assertion methods around the
+// TestingT interface.
+type Assertions struct {
+ t TestingT
+}
+
+// New makes a new Assertions object for the specified TestingT.
+func New(t TestingT) *Assertions {
+ return &Assertions{
+ t: t,
+ }
+}
+
+//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs"
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
new file mode 100644
index 0000000..4ed341d
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -0,0 +1,162 @@
+package assert
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "strings"
+)
+
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url, nil)
+ if err != nil {
+ return -1, err
+ }
+ req.URL.RawQuery = values.Encode()
+ handler(w, req)
+ return w.Code, nil
+}
+
+// HTTPSuccess asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
+}
+
+// HTTPRedirect asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
+}
+
+// HTTPError asserts that a specified handler returns an error status code.
+//
+// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
+}
+
+// HTTPStatusCode asserts that a specified handler returns a specified status code.
+//
+// assert.HTTPStatusCode(t, myHandler, "GET", "/notImplemented", nil, 501)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, statuscode int, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
+ }
+
+ successful := code == statuscode
+ if !successful {
+ Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code))
+ }
+
+ return successful
+}
+
+// HTTPBody is a helper that returns HTTP body of the response. It returns
+// empty string if building a new request fails.
+func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
+ w := httptest.NewRecorder()
+ req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
+ if err != nil {
+ return ""
+ }
+ handler(w, req)
+ return w.Body.String()
+}
+
+// HTTPBodyContains asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if !contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return contains
+}
+
+// HTTPBodyNotContains asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContains(t, myHandler, "GET", "www.google.com", nil, "I'm Feeling Lucky")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}, msgAndArgs ...interface{}) bool {
+ if h, ok := t.(tHelper); ok {
+ h.Helper()
+ }
+ body := HTTPBody(handler, method, url, values)
+
+ contains := strings.Contains(body, fmt.Sprint(str))
+ if contains {
+ Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body))
+ }
+
+ return !contains
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
new file mode 100644
index 0000000..2734907
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitignore
@@ -0,0 +1,15 @@
+*.out
+*.test
+*.xml
+*.swp
+.idea/
+.tmp/
+*.iml
+*.cov
+*.html
+*.log
+gen/thrift/js
+gen/thrift/py
+vendor/
+crossdock-main
+crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
new file mode 100644
index 0000000..295ebcf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "idl"]
+ path = idl
+ url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
new file mode 100644
index 0000000..435aea1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -0,0 +1,56 @@
+sudo: required
+
+language: go
+go_import_path: github.com/uber/jaeger-client-go
+
+dist: trusty
+
+matrix:
+ include:
+ # - go: 1.15.x
+ # env:
+ # - TESTS=true
+ # - USE_DEP=true
+ # - COVERAGE=true
+ - go: 1.15.x
+ env:
+ - USE_DEP=true
+ - CROSSDOCK=true
+ # - go: 1.15.x
+ # env:
+ # - TESTS=true
+ # - USE_DEP=false
+ # - USE_GLIDE=true
+ # test with previous version of Go
+ # - go: 1.14.x
+ # env:
+ # - TESTS=true
+ # - USE_DEP=true
+ # - CI_SKIP_LINT=true
+
+services:
+ - docker
+
+env:
+ global:
+ - DOCKER_COMPOSE_VERSION=1.8.0
+ - COMMIT=${TRAVIS_COMMIT::8}
+ # DOCKER_PASS
+ - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
+ # DOCKER_USER
+ - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
+
+install:
+ - make install-ci USE_DEP=$USE_DEP
+ - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
+
+script:
+ - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
+ - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
+
+after_success:
+ - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
+ - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
+
+after_failure:
+ - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
new file mode 100644
index 0000000..956790e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -0,0 +1,390 @@
+Changes by Version
+==================
+
+2.29.2 (unreleased)
+-------------------
+- Nothing yet.
+
+
+2.29.1 (2021-05-24)
+-------------------
+- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro
+
+
+2.29.0 (2021-05-20)
+-------------------
+- Update vendored thrift to 0.14.1 (#584) -- @nhatthm
+
+
+2.28.0 (2021-04-30)
+-------------------
+- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott
+
+
+2.27.0 (2021-04-19)
+-------------------
+- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell
+
+
+2.26.0 (2021-04-16)
+-------------------
+- Delete a baggage item when value is blank (#562) -- evan.kim
+- Trim baggage key when parsing (#566) -- sicong.huang
+- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o
+- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro
+- Additional context protections (#544) -- Joe Elliott
+- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima
+- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro
+- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro
+- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel
+- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro
+
+
+2.25.0 (2020-07-13)
+-------------------
+## Breaking changes
+- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
+
+ The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
+ The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
+ or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
+
+## Bug fixes
+- Do not add invalid context to references (#521) -- Yuri Shkuro
+
+
+2.24.0 (2020-06-14)
+-------------------
+- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
+- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
+- Override reporter config only when agent host/port is set in env (#513) -- ilylia
+- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
+
+
+2.23.1 (2020-04-28)
+-------------------
+- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
+
+
+2.23.0 (2020-04-22)
+-------------------
+
+- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
+- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
+- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
+
+2.22.1 (2020-01-16)
+-------------------
+
+- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
+
+
+2.22.0 (2020-01-15)
+-------------------
+
+- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
+- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
+
+
+2.21.1 (2019-12-20)
+-------------------
+
+- Update version correctly.
+
+
+2.21.0 (2019-12-20)
+-------------------
+
+- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
+- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
+- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
+- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
+
+
+2.20.1 (2019-11-08)
+-------------------
+
+Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
+
+- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
+- Create `OperationNameLateBinding` sampler option and config option
+- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
+
+
+2.20.0 (2019-11-06)
+-------------------
+
+## New Features
+
+- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
+
+ Sampling state is shared between all spans of the trace that are still in memory.
+ This allows implementation of delayed sampling decisions (see below).
+
+- Support delayed sampling decisions (#449) -- Yuri Shkuro
+
+ This is a large structural change to how the samplers work.
+ It allows some samplers to be executed multiple times on different
+ span events (like setting a tag) and make a positive sampling decision
+ later in the span life cycle, or even based on children spans.
+ See [README](./README.md#delayed-sampling) for more details.
+
+ There is a related minor change in behavior of the adaptive (per-operation) sampler,
+ which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
+ operation used to make the sampling decision is always the one provided at span creation.
+
+- Add experimental tag matching sampler (#452) -- Yuri Shkuro
+
+ A sampler that can sample a trace based on a certain tag added to the root
+ span or one of its local (in-process) children. The sampler can be used with
+ another experimental `PrioritySampler` that allows multiple samplers to try
+ to make a sampling decision, in a certain priority order.
+
+- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
+- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
+
+## Minor patches
+
+- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
+- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
+- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
+- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
+
+2.19.0 (2019-09-23)
+-------------------
+
+- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
+
+
+2.18.1 (2019-09-16)
+-------------------
+
+- Remove go.mod / go.sum that interfere with `go get` (#432)
+
+
+2.18.0 (2019-09-09)
+-------------------
+
+- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) <Jun Guo>
+
+
+2.17.0 (2019-08-30)
+-------------------
+
+- Add a flag for firehose mode (#419) <Prithvi Raj>
+- Default sampling server URL to agent (#414) <Bryan Boreham>
+- Update default sampling rate when sampling strategy is refreshed (#413) <Bryan Boreham>
+- Support "Self" Span Reference (#411) <dm03514>
+- Don't complain about blank service name if tracing is Disabled (#410) Yuri <Shkuro>
+- Use IP address from tag if exist (#402) <NikoKVCS>
+- Expose span data to custom reporters [fixes #394] (#399) <Curtis Allen>
+- Fix the span allocation in the pool (#381) <Dmitry Ponomarev>
+
+
+2.16.0 (2019-03-24)
+-------------------
+
+- Add baggage to B3 codec (#319) <Pavol Loffay>
+- Add support for 128bit trace ids to zipkin thrift spans. (#378) <Douglas Reid>
+- Update zipkin propagation logic to support 128bit traceIDs (#373) <Douglas Reid>
+- Accept "true" for the x-b3-sampled header (#356) <Adrian Bogatu>
+
+- Allow setting of PoolSpans from Config object (#322) <Matthew Pound>
+- Make propagators public to allow wrapping (#379) <Ivan Babrou>
+- Change default metric namespace to use relevant separator for the metric backend (#364) <Gary Brown>
+- Change metrics prefix to jaeger_tracer and add descriptions (#346) <Gary Brown>
+- Bump OpenTracing to ^1.1.x (#383) <Yuri Shkuro>
+- Upgrade jaeger-lib to v2.0.0 (#359) <Gary Brown>
+- Avoid defer when generating random number (#358) <Gary Brown>
+- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) <Gary Brown>
+- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) <Eundoo Song>
+
+
+2.15.0 (2018-10-10)
+-------------------
+
+- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) <Zvi Cahana>
+- Make maximum annotation length configurable in tracer options (#318) <Eric Chang>
+- Support more environment variables in configuration (#323) <Daneyon Hansen>
+- Print error on Sampler Query failure (#328) <Goutham Veeramachaneni>
+- Add an HTTPOption to support custom http.RoundTripper (#333) <Michael Puncel>
+- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) <Michael Puncel>
+
+
+2.14.0 (2018-04-30)
+-------------------
+
+- Support throttling for debug traces (#274) <Isaac Hier>
+- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
+- Remove dependency on tchannel (#295) (#294) <Yuri Shkuro>
+- Test with Go 1.9 (#298) <Yuri Shkuro>
+
+
+2.13.0 (2018-04-15)
+-------------------
+
+- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
+- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
+- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
+- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
+- Allow overriding sampler in the Config (#270) <Mike Kabischev>
+
+
+2.12.0 (2018-03-14)
+-------------------
+
+- Use lock when retrieving span.Context() (#268)
+- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
+
+
+2.11.2 (2018-01-12)
+-------------------
+
+- Add Gopkg.toml to allow using the lib with `dep`
+
+
+2.11.1 (2018-01-03)
+-------------------
+
+- Do not enqueue spans after Reporter is closed (#235, #245)
+- Change default flush interval to 1sec (#243)
+
+
+2.11.0 (2017-11-27)
+-------------------
+
+- Normalize metric names and tags to be compatible with Prometheus (#222)
+
+
+2.10.0 (2017-11-14)
+-------------------
+
+- Support custom tracing headers (#176)
+- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
+- Do not coerce baggage keys to lower case (#196)
+- Log span name when span cannot be reported (#198)
+- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
+
+
+2.9.0 (2017-07-29)
+------------------
+
+- Pin thrift <= 0.10 (#179)
+- Introduce a parallel interface ContribObserver (#159)
+
+
+2.8.0 (2017-07-05)
+------------------
+
+- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
+- Add options to set tracer tags
+
+
+2.7.0 (2017-06-21)
+------------------
+
+- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
+- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
+- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
+- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
+- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
+
+
+2.6.0 (2017-03-28)
+------------------
+
+- Add config option to initialize RPC Metrics feature
+
+
+2.5.0 (2017-03-23)
+------------------
+
+- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
+- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
+- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
+
+
+2.4.0 (2017-03-21)
+------------------
+
+- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
+- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
+
+
+2.3.0 (2017-03-20)
+------------------
+
+- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
+- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
+
+
+2.2.1 (2017-03-14)
+------------------
+
+- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
+
+
+2.2.0 (2017-03-10)
+------------------
+
+- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
+- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
+
+
+2.1.2 (2017-02-27)
+-------------------
+
+- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
+- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
+- Add tracer initialization godoc examples
+
+
+2.1.1 (2017-02-21)
+-------------------
+
+- Fix inefficient usage of zap.Logger
+
+
+2.1.0 (2017-02-17)
+-------------------
+
+- Add adapter for zap.Logger (https://github.com/uber-go/zap)
+- Move logging API to ./log/ package
+
+
+2.0.0 (2017-02-08)
+-------------------
+
+- Support Adaptive Sampling
+- Support 128bit Trace IDs
+- Change trace/span IDs from uint64 to strong types TraceID and SpanID
+- Add Zipkin HTTP B3 Propagation format support #72
+- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
+- Change API for tracer, reporter, sampler initialization
+
+
+1.6.0 (2016-10-14)
+-------------------
+
+- Add Zipkin HTTP transport
+- Support external baggage via jaeger-baggage header
+- Unpin Thrift version, keep to master
+
+
+1.5.1 (2016-09-27)
+-------------------
+
+- Relax dependency on opentracing to ^1
+
+
+1.5.0 (2016-09-27)
+-------------------
+
+- Upgrade to opentracing-go 1.0
+- Support KV logging for Spans
+
+
+1.4.0 (2016-09-14)
+-------------------
+
+- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
new file mode 100644
index 0000000..0572efc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
@@ -0,0 +1,2 @@
+
+* @jaegertracing/jaeger-maintainers
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
new file mode 100644
index 0000000..41e2154
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
@@ -0,0 +1,170 @@
+# How to Contribute to Jaeger
+
+We'd love your help!
+
+Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
+pull requests. This document outlines some of the conventions on development
+workflow, commit message formatting, contact points and other resources to make
+it easier to get your contribution accepted.
+
+We gratefully welcome improvements to documentation as well as to code.
+
+# Certificate of Origin
+
+By contributing to this project you agree to the [Developer Certificate of
+Origin](https://developercertificate.org/) (DCO). This document was created
+by the Linux Kernel community and is a simple statement that you, as a
+contributor, have the legal right to make the contribution. See the [DCO](DCO)
+file for details.
+
+## Getting Started
+
+This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
+
+To get started, make sure you clone the Git repository into the correct location
+`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
+
+```
+mkdir -p $GOPATH/src/github.com/uber
+cd $GOPATH/src/github.com/uber
+git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
+cd jaeger-client-go
+git submodule update --init --recursive
+```
+
+Then install dependencies and run the tests:
+
+```
+make install
+make test
+```
+
+## Imports grouping
+
+This projects follows the following pattern for grouping imports in Go files:
+ * imports from standard library
+ * imports from other projects
+ * imports from `jaeger-client-go` project
+
+For example:
+
+```go
+import (
+ "fmt"
+
+ "github.com/uber/jaeger-lib/metrics"
+ "go.uber.org/zap"
+
+ "github.com/uber/jaeger-client-go/config"
+)
+```
+
+## Making A Change
+
+*Before making any significant changes, please [open an
+issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
+changes ahead of time will make the contribution process smooth for everyone.
+
+Once we've discussed your changes and you've got your code ready, make sure
+that tests are passing (`make test` or `make cover`) and open your PR. Your
+pull request is most likely to be accepted if it:
+
+* Includes tests for new functionality.
+* Follows the guidelines in [Effective
+ Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
+ review comments](https://github.com/golang/go/wiki/CodeReviewComments).
+* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
+ * Separate subject from body with a blank line
+ * Limit the subject line to 50 characters
+ * Capitalize the subject line
+ * Do not end the subject line with a period
+ * Use the imperative mood in the subject line
+ * Wrap the body at 72 characters
+ * Use the body to explain _what_ and _why_ instead of _how_
+* Each commit must be signed by the author ([see below](#sign-your-work)).
+
+## License
+
+By contributing your code, you agree to license your contribution under the terms
+of the [Apache License](LICENSE).
+
+If you are adding a new file it should have a header like below. The easiest
+way to add such header is to run `make fmt`.
+
+```
+// Copyright (c) 2017 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+```
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the
+patch, which certifies that you wrote it or otherwise have the right to
+pass it on as an open-source patch. The rules are pretty simple: if you
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+ Signed-off-by: Joe Smith <joe@gmail.com>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+If you want this to be automatic you can set up some aliases:
+
+```
+git config --add alias.amend "commit -s --amend"
+git config --add alias.c "commit -s"
+```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
new file mode 100644
index 0000000..068953d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/DCO
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
new file mode 100644
index 0000000..268289b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -0,0 +1,301 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
+ name = "github.com/HdrHistogram/hdrhistogram-go"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
+ version = "0.9.0"
+
+[[projects]]
+ digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+ name = "github.com/beorn7/perks"
+ packages = ["quantile"]
+ pruneopts = "UT"
+ revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
+ version = "v1.0.1"
+
+[[projects]]
+ branch = "master"
+ digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
+ name = "github.com/crossdock/crossdock-go"
+ packages = [
+ ".",
+ "assert",
+ "require",
+ ]
+ pruneopts = "UT"
+ revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
+
+[[projects]]
+ digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+ name = "github.com/davecgh/go-spew"
+ packages = ["spew"]
+ pruneopts = "UT"
+ revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+ version = "v1.1.1"
+
+[[projects]]
+ digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
+ name = "github.com/golang/mock"
+ packages = ["gomock"]
+ pruneopts = "UT"
+ revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d"
+ version = "v1.4.4"
+
+[[projects]]
+ digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985"
+ name = "github.com/golang/protobuf"
+ packages = [
+ "proto",
+ "ptypes",
+ "ptypes/any",
+ "ptypes/duration",
+ "ptypes/timestamp",
+ ]
+ pruneopts = "UT"
+ revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17"
+ version = "v1.4.2"
+
+[[projects]]
+ digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+ name = "github.com/matttproud/golang_protobuf_extensions"
+ packages = ["pbutil"]
+ pruneopts = "UT"
+ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+ version = "v1.0.1"
+
+[[projects]]
+ digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01"
+ name = "github.com/opentracing/opentracing-go"
+ packages = [
+ ".",
+ "ext",
+ "harness",
+ "log",
+ ]
+ pruneopts = "UT"
+ revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12"
+ version = "v1.2.0"
+
+[[projects]]
+ digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314"
+ name = "github.com/pkg/errors"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "614d223910a179a466c1767a985424175c39b465"
+ version = "v0.9.1"
+
+[[projects]]
+ digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+ name = "github.com/pmezard/go-difflib"
+ packages = ["difflib"]
+ pruneopts = "UT"
+ revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+ version = "v1.0.0"
+
+[[projects]]
+ digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
+ name = "github.com/prometheus/client_golang"
+ packages = [
+ "prometheus",
+ "prometheus/internal",
+ ]
+ pruneopts = "UT"
+ revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
+ version = "v1.1.0"
+
+[[projects]]
+ digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0"
+ name = "github.com/prometheus/client_model"
+ packages = ["go"]
+ pruneopts = "UT"
+ revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9"
+ version = "v0.2.0"
+
+[[projects]]
+ digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a"
+ name = "github.com/prometheus/common"
+ packages = [
+ "expfmt",
+ "internal/bitbucket.org/ww/goautoneg",
+ "model",
+ ]
+ pruneopts = "UT"
+ revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc"
+ version = "v0.14.0"
+
+[[projects]]
+ digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87"
+ name = "github.com/prometheus/procfs"
+ packages = [
+ ".",
+ "internal/fs",
+ "internal/util",
+ ]
+ pruneopts = "UT"
+ revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486"
+ version = "v0.2.0"
+
+[[projects]]
+ digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e"
+ name = "github.com/stretchr/objx"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6"
+ version = "v0.3.0"
+
+[[projects]]
+ digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea"
+ name = "github.com/stretchr/testify"
+ packages = [
+ "assert",
+ "mock",
+ "require",
+ "suite",
+ ]
+ pruneopts = "UT"
+ revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
+ version = "v1.6.1"
+
+[[projects]]
+ digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61"
+ name = "github.com/uber/jaeger-lib"
+ packages = [
+ "metrics",
+ "metrics/metricstest",
+ "metrics/prometheus",
+ ]
+ pruneopts = "UT"
+ revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4"
+ version = "v2.3.0"
+
+[[projects]]
+ digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f"
+ name = "go.uber.org/atomic"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182"
+ version = "v1.7.0"
+
+[[projects]]
+ digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298"
+ name = "go.uber.org/multierr"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "3114a8b704d2d28dbacda34a872690aaef66aeed"
+ version = "v1.6.0"
+
+[[projects]]
+ digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236"
+ name = "go.uber.org/zap"
+ packages = [
+ ".",
+ "buffer",
+ "internal/bufferpool",
+ "internal/color",
+ "internal/exit",
+ "zapcore",
+ "zaptest/observer",
+ ]
+ pruneopts = "UT"
+ revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510"
+ version = "v1.16.0"
+
+[[projects]]
+ branch = "master"
+ digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
+ name = "golang.org/x/net"
+ packages = [
+ "context",
+ "context/ctxhttp",
+ ]
+ pruneopts = "UT"
+ revision = "328152dc79b1547da63f950cd4cdd9afd50b2774"
+
+[[projects]]
+ branch = "master"
+ digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86"
+ name = "golang.org/x/sys"
+ packages = [
+ "internal/unsafeheader",
+ "unix",
+ "windows",
+ ]
+ pruneopts = "UT"
+ revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9"
+
+[[projects]]
+ digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1"
+ name = "google.golang.org/protobuf"
+ packages = [
+ "encoding/prototext",
+ "encoding/protowire",
+ "internal/descfmt",
+ "internal/descopts",
+ "internal/detrand",
+ "internal/encoding/defval",
+ "internal/encoding/messageset",
+ "internal/encoding/tag",
+ "internal/encoding/text",
+ "internal/errors",
+ "internal/fieldsort",
+ "internal/filedesc",
+ "internal/filetype",
+ "internal/flags",
+ "internal/genid",
+ "internal/impl",
+ "internal/mapsort",
+ "internal/pragma",
+ "internal/set",
+ "internal/strs",
+ "internal/version",
+ "proto",
+ "reflect/protoreflect",
+ "reflect/protoregistry",
+ "runtime/protoiface",
+ "runtime/protoimpl",
+ "types/known/anypb",
+ "types/known/durationpb",
+ "types/known/timestamppb",
+ ]
+ pruneopts = "UT"
+ revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1"
+ version = "v1.25.0"
+
+[[projects]]
+ branch = "v3"
+ digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b"
+ name = "gopkg.in/yaml.v3"
+ packages = ["."]
+ pruneopts = "UT"
+ revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = [
+ "github.com/crossdock/crossdock-go",
+ "github.com/golang/mock/gomock",
+ "github.com/opentracing/opentracing-go",
+ "github.com/opentracing/opentracing-go/ext",
+ "github.com/opentracing/opentracing-go/harness",
+ "github.com/opentracing/opentracing-go/log",
+ "github.com/pkg/errors",
+ "github.com/prometheus/client_golang/prometheus",
+ "github.com/stretchr/testify/assert",
+ "github.com/stretchr/testify/mock",
+ "github.com/stretchr/testify/require",
+ "github.com/stretchr/testify/suite",
+ "github.com/uber/jaeger-lib/metrics",
+ "github.com/uber/jaeger-lib/metrics/metricstest",
+ "github.com/uber/jaeger-lib/metrics/prometheus",
+ "go.uber.org/atomic",
+ "go.uber.org/zap",
+ "go.uber.org/zap/zapcore",
+ "go.uber.org/zap/zaptest/observer",
+ ]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
new file mode 100644
index 0000000..3aa307a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -0,0 +1,31 @@
+[[constraint]]
+ name = "github.com/crossdock/crossdock-go"
+ branch = "master"
+
+[[constraint]]
+ name = "github.com/opentracing/opentracing-go"
+ version = "^1.2"
+
+[[constraint]]
+ name = "github.com/prometheus/client_golang"
+ version = "^1"
+
+[[constraint]]
+ name = "github.com/stretchr/testify"
+ version = "^1.1.3"
+
+[[constraint]]
+ name = "go.uber.org/atomic"
+ version = "^1"
+
+[[constraint]]
+ name = "github.com/uber/jaeger-lib"
+ version = "^2.3"
+
+[[constraint]]
+ name = "go.uber.org/zap"
+ version = "^1"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
new file mode 100644
index 0000000..bb7463c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -0,0 +1,149 @@
+PROJECT_ROOT=github.com/uber/jaeger-client-go
+export GO111MODULE=off
+PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+# all .go files that don't exist in hidden directories
+ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
+ -e ".*/\..*" \
+ -e ".*/_.*" \
+ -e ".*/mocks.*")
+
+USE_DEP := true
+
+-include crossdock/rules.mk
+
+RACE=-race
+GOTEST=go test -v $(RACE)
+GOLINT=golint
+GOVET=go vet
+GOFMT=gofmt
+FMT_LOG=fmt.log
+LINT_LOG=lint.log
+
+THRIFT_VER=0.14
+THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
+THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
+THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
+THRIFT_GEN_DIR=thrift-gen
+
+PASS=$(shell printf "\033[32mPASS\033[0m")
+FAIL=$(shell printf "\033[31mFAIL\033[0m")
+COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
+
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test fmt lint
+
+.PHONY: test
+test:
+ifeq ($(USE_DEP),true)
+ dep check
+endif
+ bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
+
+.PHONY: fmt
+fmt:
+ $(GOFMT) -e -s -l -w $(ALL_SRC)
+ ./scripts/updateLicenses.sh
+
+.PHONY: lint
+lint: vet golint lint-fmt lint-thrift-testing
+
+.PHONY: vet
+vet:
+ $(GOVET) $(PACKAGES)
+
+.PHONY: golint
+golint:
+ @cat /dev/null > $(LINT_LOG)
+ @$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
+ @[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
+
+.PHONY: lint-fmt
+lint-fmt:
+ @$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
+ ./scripts/updateLicenses.sh >> $(FMT_LOG)
+ @[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
+
+# make sure thrift/ module does not import "testing"
+.PHONY: lint-thrift-testing
+lint-thrift-testing:
+ @cat /dev/null > $(LINT_LOG)
+ @(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true
+ @[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false)
+
+.PHONY: install
+install:
+ @echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
+ifeq ($(USE_DEP),true)
+ dep version || make install-dep
+ dep ensure -vendor-only -v
+endif
+ifeq ($(USE_GLIDE),true)
+ glide --version || go get github.com/Masterminds/glide
+ glide install
+endif
+
+
+.PHONY: cover
+cover:
+ $(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
+
+.PHONY: cover-html
+cover-html: cover
+ go tool cover -html=cover.out -o cover.html
+
+# This is not part of the regular test target because we don't want to slow it
+# down.
+.PHONY: test-examples
+test-examples:
+ make -C examples
+
+.PHONY: thrift
+thrift: idl-submodule thrift-compile
+
+# TODO at the moment we're not generating tchan_*.go files
+.PHONY: thrift-compile
+thrift-compile: thrift-image
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
+ $(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
+ sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
+ sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
+ sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
+ $(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
+ rm -rf thrift-gen/*/*-remote
+ rm -rf crossdock/thrift/*/*-remote
+ rm -rf thrift-gen/jaeger/collector.go
+
+.PHONY: idl-submodule
+idl-submodule:
+ git submodule init
+ git submodule update
+
+.PHONY: thrift-image
+thrift-image:
+ $(THRIFT) -version
+
+.PHONY: install-dep
+install-dep:
+ - curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
+ - chmod +x $$GOPATH/bin/dep
+
+.PHONY: install-ci
+install-ci: install
+ go get github.com/wadey/gocovmerge
+ go get github.com/mattn/goveralls
+ go get golang.org/x/tools/cmd/cover
+ go get golang.org/x/lint/golint
+
+.PHONY: test-ci
+test-ci: cover
+ifeq ($(CI_SKIP_LINT),true)
+ echo 'skipping lint'
+else
+ make lint
+endif
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
new file mode 100644
index 0000000..687f578
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -0,0 +1,324 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+
+# Jaeger Bindings for Go OpenTracing API
+
+Instrumentation library that implements an
+[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
+
+**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
+ * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
+ * :x: `import "github.com/jaegertracing/jaeger-client-go"`
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## Installation
+
+We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
+and [semantic versioning](http://semver.org/) when including this library into an application.
+For example, Jaeger backend imports this library like this:
+
+```toml
+[[constraint]]
+ name = "github.com/uber/jaeger-client-go"
+ version = "2.17"
+```
+
+If you instead want to use the latest version in `master`, you can pull it via `go get`.
+Note that during `go get` you may see build errors due to incompatible dependencies, which is why
+we recommend using semantic versions for dependencies. The error may be fixed by running
+`make install` (it will install `dep` if you don't have it):
+
+```shell
+go get -u github.com/uber/jaeger-client-go/
+cd $GOPATH/src/github.com/uber/jaeger-client-go/
+git submodule update --init --recursive
+make install
+```
+
+## Initialization
+
+See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+and [config/example_test.go](./config/example_test.go).
+
+### Environment variables
+
+The tracer can be initialized with values coming from environment variables, if it is
+[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
+that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
+None of the env vars are required and all of them can be overridden via direct setting
+of the property on the configuration object.
+
+Property| Description
+--- | ---
+JAEGER_SERVICE_NAME | The service name.
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
+JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
+JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
+JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
+JAEGER_SAMPLER_PARAM | The sampler parameter (number).
+JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
+JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
+JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
+JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
+
+By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
+`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
+to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
+secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
+variables.
+
+### Closing the tracer via `io.Closer`
+
+The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
+It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
+before exiting, e.g.
+
+```go
+tracer, closer, err := cfg.NewTracer(...)
+defer closer.Close()
+```
+
+This is especially useful for command-line tools that enable tracing, as well as
+for the long-running apps that support graceful shutdown. For example, if your deployment
+system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
+exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
+
+### Metrics & Monitoring
+
+The tracer emits a number of different metrics, defined in
+[metrics.go](metrics.go). The monitoring backend is expected to support
+tag-based metric names, e.g. instead of `statsd`-style string names
+like `counters.my-service.jaeger.spans.started.sampled`, the metrics
+are defined by a short name and a collection of key/value tags, for
+example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
+file for the full list and descriptions of emitted metrics.
+
+The monitoring backend is represented by the `metrics.Factory` interface from package
+[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics). An implementation
+of that interface can be passed as an option to either the Configuration object or the Tracer
+constructor, for example:
+
+```go
+import (
+ "github.com/uber/jaeger-client-go/config"
+ "github.com/uber/jaeger-lib/metrics/prometheus"
+)
+
+ metricsFactory := prometheus.New()
+ tracer, closer, err := config.Configuration{
+ ServiceName: "your-service-name",
+ }.NewTracer(
+ config.Metrics(metricsFactory),
+ )
+```
+
+By default, a no-op `metrics.NullFactory` is used.
+
+### Logging
+
+The tracer can be configured with an optional logger, which will be
+used to log communication errors, or log spans if a logging reporter
+option is specified in the configuration. The logging API is abstracted
+by the [Logger](logger.go) interface. A logger instance implementing
+this interface can be set on the `Config` object before calling the
+`New` method.
+
+Besides the [zap](https://github.com/uber-go/zap) implementation
+bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
+one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
+
+## Instrumentation for Tracing
+
+Since this tracer is fully compliant with OpenTracing API 1.0,
+all code instrumentation should only use the API itself, as described
+in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
+
+## Features
+
+### Reporters
+
+A "reporter" is a component that receives the finished spans and reports
+them to somewhere. Under normal circumstances, the Tracer
+should use the default `RemoteReporter`, which sends the spans out of
+process via configurable "transport". For testing purposes, one can
+use an `InMemoryReporter` that accumulates spans in a buffer and
+allows to retrieve them for later verification. Also available are
+`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
+which logs all finished spans using their `String()` method, and a
+`CompositeReporter` that can be used to combine more than one reporter
+into one, e.g. to attach a logging reporter to the main remote reporter.
+
+### Span Reporting Transports
+
+The remote reporter uses "transports" to actually send the spans out
+of process. Currently the supported transports include:
+ * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
+ * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
+
+### Sampling
+
+The tracer does not record all spans, but only those that have the
+sampling bit set in the `flags`. When a new trace is started and a new
+unique ID is generated, a sampling decision is made whether this trace
+should be sampled. The sampling decision is propagated to all downstream
+calls via the `flags` field of the trace context. The following samplers
+are available:
+ 1. `RemotelyControlledSampler` uses one of the other simpler samplers
+ and periodically updates it by polling an external server. This
+ allows dynamic control of the sampling strategies.
+ 1. `ConstSampler` always makes the same sampling decision for all
+ trace IDs. it can be configured to either sample all traces, or
+ to sample none.
+ 1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
+ for a given trace to be sampled. The actual decision is made by
+ comparing the trace ID with a random number multiplied by the
+ sampling rate.
+ 1. `RateLimitingSampler` can be used to allow only a certain fixed
+ number of traces to be sampled per second.
+
+#### Delayed sampling
+
+Version 2.20 introduced the ability to delay sampling decisions in the life cycle
+of the root span. It involves several features and architectural changes:
+ * **Shared sampling state**: the sampling state is shared across all local
+ (i.e. in-process) spans for a given trace.
+ * **New `SamplerV2` API** allows the sampler to be called at multiple points
+ in the life cycle of a span:
+ * on span creation
+ * on overwriting span operation name
+ * on setting span tags
+ * on finishing the span
+ * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
+ to indicate if the negative sampling decision is final or not (positive sampling
+ decisions are always final). If the decision is not final, the sampler will be
+ called again on further span life cycle events, like setting tags.
+
+These new features are used in the experimental `x.TagMatchingSampler`, which
+can sample a trace based on a certain tag added to the root
+span or one of its local (in-process) children. The sampler can be used with
+another experimental `x.PrioritySampler` that allows multiple samplers to try
+to make a sampling decision, in a certain priority order.
+
+### Baggage Injection
+
+The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
+to the span context and propagated throughout the trace. An external process can inject baggage
+by setting the special HTTP Header `jaeger-baggage` on a request:
+
+```sh
+curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
+```
+
+Baggage can also be programatically set inside your service:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ span.SetBaggageItem("key", "value")
+}
+```
+
+Another service downstream of that can retrieve the baggage in a similar way:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+ val := span.BaggageItem("key")
+ println(val)
+}
+```
+
+### Debug Traces (Forced Sampling)
+
+#### Programmatically
+
+The OpenTracing API defines a `sampling.priority` standard tag that
+can be used to affect the sampling of a span and its children:
+
+```go
+import (
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+)
+
+span := opentracing.SpanFromContext(ctx)
+ext.SamplingPriority.Set(span, 1)
+```
+
+#### Via HTTP Headers
+
+Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
+which can be set in the incoming request, e.g.
+
+```sh
+curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
+```
+
+When Jaeger sees this header in the request that otherwise has no
+tracing context, it ensures that the new trace started for this
+request will be sampled in the "debug" mode (meaning it should survive
+all downsampling that might happen in the collection pipeline), and the
+root span will have a tag as if this statement was executed:
+
+```go
+span.SetTag("jaeger-debug-id", "some-correlation-id")
+```
+
+This allows using Jaeger UI to find the trace by this tag.
+
+### Zipkin HTTP B3 compatible header propagation
+
+Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
+by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
+
+However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
+
+## SelfRef
+
+Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
+to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
+but not yet accepted. This allows the caller to provide an already created `SpanContext`
+when starting a new span. The `Self` reference bypasses trace and span id generation,
+as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
+set appropriately by the caller).
+
+The `Self` reference supports the following use cases:
+ * the ability to provide externally generated trace and span IDs
+ * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
+
+Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
+```
+span := tracer.StartSpan(
+ "continued_span",
+ jaeger.SelfRef(yourSpanContext),
+)
+...
+defer span.Finish()
+```
+
+## License
+
+[Apache 2.0 License](LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
+[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
+[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
+[ot-url]: http://opentracing.io
+[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
+[timeunits]: https://golang.org/pkg/time/#ParseDuration
+[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
new file mode 100644
index 0000000..12438d8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -0,0 +1,12 @@
+# Release Process
+
+1. Create a PR "Preparing for release X.Y.Z" against master branch
+ * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
+ * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
+ * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
+2. Create a release "Release X.Y.Z" on Github
+ * Create Tag `vX.Y.Z`
+ * Copy CHANGELOG.md into the release notes
+3. Create a PR "Back to development" against master branch
+ * Add `<next_version> (unreleased)` to CHANGELOG.md
+ * Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
new file mode 100644
index 0000000..1037ca0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go/log"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+)
+
+// baggageSetter is an actor that can set a baggage value on a Span given certain
+// restrictions (eg. maxValueLength).
+type baggageSetter struct {
+ restrictionManager baggage.RestrictionManager
+ metrics *Metrics
+}
+
+func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
+ return &baggageSetter{
+ restrictionManager: restrictionManager,
+ metrics: metrics,
+ }
+}
+
+// (NB) span should hold the lock before making this call
+func (s *baggageSetter) setBaggage(span *Span, key, value string) {
+ var truncated bool
+ var prevItem string
+ restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
+ if !restriction.KeyAllowed() {
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ s.metrics.BaggageUpdateFailure.Inc(1)
+ return
+ }
+ if len(value) > restriction.MaxValueLength() {
+ truncated = true
+ value = value[:restriction.MaxValueLength()]
+ s.metrics.BaggageTruncate.Inc(1)
+ }
+ prevItem = span.context.baggage[key]
+ s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+ span.context = span.context.WithBaggageItem(key, value)
+ s.metrics.BaggageUpdateSuccess.Inc(1)
+}
+
+func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
+ if !span.context.IsSampled() {
+ return
+ }
+ fields := []log.Field{
+ log.String("event", "baggage"),
+ log.String("key", key),
+ log.String("value", value),
+ }
+ if prevItem != "" {
+ fields = append(fields, log.String("override", "true"))
+ }
+ if truncated {
+ fields = append(fields, log.String("truncated", "true"))
+ }
+ if !valid {
+ fields = append(fields, log.String("invalid", "true"))
+ }
+ span.logFieldsNoLocking(fields...)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
new file mode 100644
index 0000000..c2222f1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/uber/jaeger-client-go/utils"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/internal/baggage/remote"
+ throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
+ "github.com/uber/jaeger-client-go/rpcmetrics"
+ "github.com/uber/jaeger-client-go/transport"
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const defaultSamplingProbability = 0.001
+
+// Configuration configures and creates Jaeger Tracer
+type Configuration struct {
+ // ServiceName specifies the service name to use on the tracer.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
+ ServiceName string `yaml:"serviceName"`
+
+ // Disabled makes the config return opentracing.NoopTracer.
+ // Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED.
+ Disabled bool `yaml:"disabled"`
+
+ // RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided).
+ // Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
+ RPCMetrics bool `yaml:"rpc_metrics"`
+
+ // Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context.
+ // Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT.
+ Gen128Bit bool `yaml:"traceid_128bit"`
+
+ // Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
+ Tags []opentracing.Tag `yaml:"tags"`
+
+ Sampler *SamplerConfig `yaml:"sampler"`
+ Reporter *ReporterConfig `yaml:"reporter"`
+ Headers *jaeger.HeadersConfig `yaml:"headers"`
+ BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
+ Throttler *ThrottlerConfig `yaml:"throttler"`
+}
+
+// SamplerConfig allows initializing a non-default sampler. All fields are optional.
+type SamplerConfig struct {
+ // Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
+ Type string `yaml:"type"`
+
+ // Param is a value passed to the sampler.
+ // Valid values for Param field are:
+ // - for "const" sampler, 0 or 1 for always false/true respectively
+ // - for "probabilistic" sampler, a probability between 0 and 1
+ // - for "rateLimiting" sampler, the number of spans per second
+ // - for "remote" sampler, param is the same as for "probabilistic"
+ // and indicates the initial sampling rate before the actual one
+ // is received from the mothership.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
+ Param float64 `yaml:"param"`
+
+ // SamplingServerURL is the URL of sampling manager that can provide
+ // sampling strategy to this service.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
+ SamplingServerURL string `yaml:"samplingServerURL"`
+
+ // SamplingRefreshInterval controls how often the remotely controlled sampler will poll
+ // sampling manager for the appropriate sampling strategy.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+ SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+
+ // MaxOperations is the maximum number of operations that the PerOperationSampler
+ // will keep track of. If an operation is not tracked, a default probabilistic
+ // sampler will be used rather than the per operation specific sampler.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
+ MaxOperations int `yaml:"maxOperations"`
+
+ // Opt-in feature for applications that require late binding of span name via explicit
+ // call to SetOperationName when using PerOperationSampler. When this feature is enabled,
+ // the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
+ // decision as non-final (and the span as writeable). This may lead to degraded performance
+ // in applications that always provide the correct span name on trace creation.
+ //
+ // For backwards compatibility this option is off by default.
+ OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
+
+ // Options can be used to programmatically pass additional options to the Remote sampler.
+ Options []jaeger.SamplerOption
+}
+
+// ReporterConfig configures the reporter. All fields are optional.
+type ReporterConfig struct {
+ // QueueSize controls how many spans the reporter can keep in memory before it starts dropping
+ // new spans. The queue is continuously drained by a background go-routine, as fast as spans
+ // can be sent out of process.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+ QueueSize int `yaml:"queueSize"`
+
+ // BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
+ // It is generally not useful, as it only matters for very low traffic services.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+ BufferFlushInterval time.Duration
+
+ // LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
+ // and logs all submitted spans. Main Configuration.Logger must be initialized in the code
+ // for this option to have any effect.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
+ LogSpans bool `yaml:"logSpans"`
+
+ // LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+ LocalAgentHostPort string `yaml:"localAgentHostPort"`
+
+ // DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
+ // the agent's hostname and reconnects if there was a change. This option only
+ // applies if LocalAgentHostPort is specified.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
+ DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
+
+ // AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
+ // in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
+ AttemptReconnectInterval time.Duration
+
+ // CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
+ CollectorEndpoint string `yaml:"collectorEndpoint"`
+
+ // User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_USER
+ User string `yaml:"user"`
+
+ // Password instructs reporter to include a password for basic http authentication when sending spans to
+ // jaeger-collector.
+ // Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
+ Password string `yaml:"password"`
+
+ // HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
+ // This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
+ HTTPHeaders map[string]string `yaml:"http_headers"`
+}
+
+// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
+// certain baggage keys. All fields are optional.
+type BaggageRestrictionsConfig struct {
+ // DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
+ // manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
+ // been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
+ // restrictions have been retrieved from jaeger-agent.
+ DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
+
+ // HostPort is the hostPort of jaeger-agent's baggage restrictions server
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the baggage restriction manager will poll
+ // jaeger-agent for the most recent baggage restrictions.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+}
+
+// ThrottlerConfig configures the throttler which can be used to throttle the
+// rate at which the client may send debug requests.
+type ThrottlerConfig struct {
+ // HostPort of jaeger-agent's credit server.
+ HostPort string `yaml:"hostPort"`
+
+ // RefreshInterval controls how often the throttler will poll jaeger-agent
+ // for more throttling credits.
+ RefreshInterval time.Duration `yaml:"refreshInterval"`
+
+ // SynchronousInitialization determines whether or not the throttler should
+ // synchronously fetch credits from the agent when an operation is seen for
+ // the first time. This should be set to true if the client will be used by
+ // a short lived service that needs to ensure that credits are fetched
+ // upfront such that sampling or throttling occurs.
+ SynchronousInitialization bool `yaml:"synchronousInitialization"`
+}
+
+type nullCloser struct{}
+
+func (*nullCloser) Close() error { return nil }
+
+// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
+// before shutdown.
+//
+// Deprecated: use NewTracer() function
+func (c Configuration) New(
+ serviceName string,
+ options ...Option,
+) (opentracing.Tracer, io.Closer, error) {
+ if serviceName != "" {
+ c.ServiceName = serviceName
+ }
+
+ return c.NewTracer(options...)
+}
+
+// NewTracer returns a new tracer based on the current configuration, using the given options,
+// and a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
+ if c.Disabled {
+ return &opentracing.NoopTracer{}, &nullCloser{}, nil
+ }
+
+ if c.ServiceName == "" {
+ return nil, nil, errors.New("no service name provided")
+ }
+
+ opts := applyOptions(options...)
+ tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
+ if c.RPCMetrics {
+ Observer(
+ rpcmetrics.NewObserver(
+ opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
+ rpcmetrics.DefaultNameNormalizer,
+ ),
+ )(&opts) // adds to c.observers
+ }
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{
+ Type: jaeger.SamplerTypeRemote,
+ Param: defaultSamplingProbability,
+ }
+ }
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ sampler := opts.sampler
+ if sampler == nil {
+ s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
+ if err != nil {
+ return nil, nil, err
+ }
+ sampler = s
+ }
+
+ reporter := opts.reporter
+ if reporter == nil {
+ r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
+ if err != nil {
+ return nil, nil, err
+ }
+ reporter = r
+ }
+
+ tracerOptions := []jaeger.TracerOption{
+ jaeger.TracerOptions.Metrics(tracerMetrics),
+ jaeger.TracerOptions.Logger(opts.logger),
+ jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
+ jaeger.TracerOptions.PoolSpans(opts.poolSpans),
+ jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
+ jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
+ jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
+ }
+
+ if c.Gen128Bit || opts.gen128Bit {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true))
+ }
+
+ if opts.randomNumber != nil {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber))
+ }
+
+ for _, tag := range opts.tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, tag := range c.Tags {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+ }
+
+ for _, obs := range opts.observers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
+ }
+
+ for _, cobs := range opts.contribObservers {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
+ }
+
+ for format, injector := range opts.injectors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
+ }
+
+ for format, extractor := range opts.extractors {
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
+ }
+
+ if c.BaggageRestrictions != nil {
+ mgr := remote.NewRestrictionManager(
+ c.ServiceName,
+ remote.Options.Metrics(tracerMetrics),
+ remote.Options.Logger(opts.logger),
+ remote.Options.HostPort(c.BaggageRestrictions.HostPort),
+ remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
+ remote.Options.DenyBaggageOnInitializationFailure(
+ c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
+ ),
+ )
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
+ }
+
+ if c.Throttler != nil {
+ debugThrottler := throttler.NewThrottler(
+ c.ServiceName,
+ throttler.Options.Metrics(tracerMetrics),
+ throttler.Options.Logger(opts.logger),
+ throttler.Options.HostPort(c.Throttler.HostPort),
+ throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
+ throttler.Options.SynchronousInitialization(
+ c.Throttler.SynchronousInitialization,
+ ),
+ )
+
+ tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
+ }
+
+ tracer, closer := jaeger.NewTracer(
+ c.ServiceName,
+ sampler,
+ reporter,
+ tracerOptions...,
+ )
+
+ return tracer, closer, nil
+}
+
+// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
+// It returns a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) InitGlobalTracer(
+ serviceName string,
+ options ...Option,
+) (io.Closer, error) {
+ if c.Disabled {
+ return &nullCloser{}, nil
+ }
+ tracer, closer, err := c.New(serviceName, options...)
+ if err != nil {
+ return nil, err
+ }
+ opentracing.SetGlobalTracer(tracer)
+ return closer, nil
+}
+
+// NewSampler creates a new sampler based on the configuration
+func (sc *SamplerConfig) NewSampler(
+ serviceName string,
+ metrics *jaeger.Metrics,
+) (jaeger.Sampler, error) {
+ samplerType := strings.ToLower(sc.Type)
+ if samplerType == jaeger.SamplerTypeConst {
+ return jaeger.NewConstSampler(sc.Param != 0), nil
+ }
+ if samplerType == jaeger.SamplerTypeProbabilistic {
+ if sc.Param >= 0 && sc.Param <= 1.0 {
+ return jaeger.NewProbabilisticSampler(sc.Param)
+ }
+ return nil, fmt.Errorf(
+ "invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
+ sc.Param,
+ )
+ }
+ if samplerType == jaeger.SamplerTypeRateLimiting {
+ return jaeger.NewRateLimitingSampler(sc.Param), nil
+ }
+ if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
+ sc2 := *sc
+ sc2.Type = jaeger.SamplerTypeProbabilistic
+ initSampler, err := sc2.NewSampler(serviceName, nil)
+ if err != nil {
+ return nil, err
+ }
+ options := []jaeger.SamplerOption{
+ jaeger.SamplerOptions.Metrics(metrics),
+ jaeger.SamplerOptions.InitialSampler(initSampler),
+ jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
+ jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
+ jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
+ jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
+ }
+ options = append(options, sc.Options...)
+ return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
+ }
+ return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
+}
+
+// NewReporter instantiates a new reporter that submits spans to the collector
+func (rc *ReporterConfig) NewReporter(
+ serviceName string,
+ metrics *jaeger.Metrics,
+ logger jaeger.Logger,
+) (jaeger.Reporter, error) {
+ sender, err := rc.newTransport(logger)
+ if err != nil {
+ return nil, err
+ }
+ reporter := jaeger.NewRemoteReporter(
+ sender,
+ jaeger.ReporterOptions.QueueSize(rc.QueueSize),
+ jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
+ jaeger.ReporterOptions.Logger(logger),
+ jaeger.ReporterOptions.Metrics(metrics))
+ if rc.LogSpans && logger != nil {
+ logger.Infof("Initializing logging reporter\n")
+ reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
+ }
+ return reporter, err
+}
+
+func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
+ switch {
+ case rc.CollectorEndpoint != "":
+ httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)}
+ if rc.User != "" && rc.Password != "" {
+ httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
+ }
+ return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
+ default:
+ return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
+ AgentClientUDPParams: utils.AgentClientUDPParams{
+ HostPort: rc.LocalAgentHostPort,
+ Logger: logger,
+ DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
+ AttemptReconnectInterval: rc.AttemptReconnectInterval,
+ },
+ })
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
new file mode 100644
index 0000000..0fc3c53
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -0,0 +1,268 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/pkg/errors"
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ // environment variable names
+ envServiceName = "JAEGER_SERVICE_NAME"
+ envDisabled = "JAEGER_DISABLED"
+ envRPCMetrics = "JAEGER_RPC_METRICS"
+ envTags = "JAEGER_TAGS"
+ envSamplerType = "JAEGER_SAMPLER_TYPE"
+ envSamplerParam = "JAEGER_SAMPLER_PARAM"
+ envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
+ envSamplingEndpoint = "JAEGER_SAMPLING_ENDPOINT"
+ envSamplerMaxOperations = "JAEGER_SAMPLER_MAX_OPERATIONS"
+ envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL"
+ envReporterMaxQueueSize = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
+ envReporterFlushInterval = "JAEGER_REPORTER_FLUSH_INTERVAL"
+ envReporterLogSpans = "JAEGER_REPORTER_LOG_SPANS"
+ envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
+ envReporterAttemptReconnectInterval = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
+ envEndpoint = "JAEGER_ENDPOINT"
+ envUser = "JAEGER_USER"
+ envPassword = "JAEGER_PASSWORD"
+ envAgentHost = "JAEGER_AGENT_HOST"
+ envAgentPort = "JAEGER_AGENT_PORT"
+ env128bit = "JAEGER_TRACEID_128BIT"
+)
+
+// FromEnv uses environment variables to set the tracer's Configuration
+func FromEnv() (*Configuration, error) {
+ c := &Configuration{}
+ return c.FromEnv()
+}
+
+// FromEnv uses environment variables and overrides existing tracer's Configuration
+func (c *Configuration) FromEnv() (*Configuration, error) {
+ if e := os.Getenv(envServiceName); e != "" {
+ c.ServiceName = e
+ }
+
+ if e := os.Getenv(envRPCMetrics); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.RPCMetrics = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
+ }
+ }
+
+ if e := os.Getenv(envDisabled); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.Disabled = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
+ }
+ }
+
+ if e := os.Getenv(envTags); e != "" {
+ c.Tags = parseTags(e)
+ }
+
+ if e := os.Getenv(env128bit); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ c.Gen128Bit = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e)
+ }
+ }
+
+ if c.Sampler == nil {
+ c.Sampler = &SamplerConfig{}
+ }
+
+ if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
+ c.Sampler = s
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain sampler config from env")
+ }
+
+ if c.Reporter == nil {
+ c.Reporter = &ReporterConfig{}
+ }
+
+ if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
+ c.Reporter = r
+ } else {
+ return nil, errors.Wrap(err, "cannot obtain reporter config from env")
+ }
+
+ return c, nil
+}
+
+// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
+func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
+ if e := os.Getenv(envSamplerType); e != "" {
+ sc.Type = e
+ }
+
+ if e := os.Getenv(envSamplerParam); e != "" {
+ if value, err := strconv.ParseFloat(e, 64); err == nil {
+ sc.Param = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplingEndpoint); e != "" {
+ sc.SamplingServerURL = e
+ } else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+ sc.SamplingServerURL = e
+ } else if e := os.Getenv(envAgentHost); e != "" {
+ // Fallback if we know the agent host - try the sampling endpoint there
+ sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
+ }
+
+ if e := os.Getenv(envSamplerMaxOperations); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ sc.MaxOperations = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
+ }
+ }
+
+ if e := os.Getenv(envSamplerRefreshInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ sc.SamplingRefreshInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
+ }
+ }
+
+ return sc, nil
+}
+
+// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
+func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
+ if e := os.Getenv(envReporterMaxQueueSize); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ rc.QueueSize = int(value)
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterFlushInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ rc.BufferFlushInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
+ }
+ }
+
+ if e := os.Getenv(envReporterLogSpans); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ rc.LogSpans = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
+ }
+ }
+
+ if e := os.Getenv(envEndpoint); e != "" {
+ u, err := url.ParseRequestURI(e)
+ if err != nil {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
+ }
+ rc.CollectorEndpoint = u.String()
+ user := os.Getenv(envUser)
+ pswd := os.Getenv(envPassword)
+ if user != "" && pswd == "" || user == "" && pswd != "" {
+ return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
+ }
+ rc.User = user
+ rc.Password = pswd
+ } else {
+ useEnv := false
+ host := jaeger.DefaultUDPSpanServerHost
+ if e := os.Getenv(envAgentHost); e != "" {
+ host = e
+ useEnv = true
+ }
+
+ port := jaeger.DefaultUDPSpanServerPort
+ if e := os.Getenv(envAgentPort); e != "" {
+ if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+ port = int(value)
+ useEnv = true
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
+ }
+ }
+ if useEnv || rc.LocalAgentHostPort == "" {
+ rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+ }
+
+ if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
+ if value, err := strconv.ParseBool(e); err == nil {
+ rc.DisableAttemptReconnecting = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
+ }
+ }
+
+ if !rc.DisableAttemptReconnecting {
+ if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
+ if value, err := time.ParseDuration(e); err == nil {
+ rc.AttemptReconnectInterval = value
+ } else {
+ return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
+ }
+ }
+ }
+ }
+
+ return rc, nil
+}
+
+// parseTags parses the given string into a collection of Tags.
+// Spec for this value:
+// - comma separated list of key=value
+// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
+// is an environment variable and `defaultValue` is the value to use in case the env var is not set
+func parseTags(sTags string) []opentracing.Tag {
+ pairs := strings.Split(sTags, ",")
+ tags := make([]opentracing.Tag, 0)
+ for _, p := range pairs {
+ kv := strings.SplitN(p, "=", 2)
+ k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
+
+ if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
+ ed := strings.SplitN(v[2:len(v)-1], ":", 2)
+ e, d := ed[0], ed[1]
+ v = os.Getenv(e)
+ if v == "" && d != "" {
+ v = d
+ }
+ }
+
+ tag := opentracing.Tag{Key: k, Value: v}
+ tags = append(tags, tag)
+ }
+
+ return tags
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
new file mode 100644
index 0000000..a2b9cbc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+ "github.com/uber/jaeger-lib/metrics"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+// Option is a function that sets some option on the client.
+type Option func(c *Options)
+
+// Options control behavior of the client.
+type Options struct {
+ metrics metrics.Factory
+ logger jaeger.Logger
+ reporter jaeger.Reporter
+ sampler jaeger.Sampler
+ contribObservers []jaeger.ContribObserver
+ observers []jaeger.Observer
+ gen128Bit bool
+ poolSpans bool
+ zipkinSharedRPCSpan bool
+ maxTagValueLength int
+ noDebugFlagOnForcedSampling bool
+ tags []opentracing.Tag
+ injectors map[interface{}]jaeger.Injector
+ extractors map[interface{}]jaeger.Extractor
+ randomNumber func() uint64
+}
+
+// Metrics creates an Option that initializes Metrics in the tracer,
+// which is used to emit statistics about spans.
+func Metrics(factory metrics.Factory) Option {
+ return func(c *Options) {
+ c.metrics = factory
+ }
+}
+
+// Logger can be provided to log Reporter errors, as well as to log spans
+// if Reporter.LogSpans is set to true.
+func Logger(logger jaeger.Logger) Option {
+ return func(c *Options) {
+ c.logger = logger
+ }
+}
+
+// Reporter can be provided explicitly to override the configuration.
+// Useful for testing, e.g. by passing InMemoryReporter.
+func Reporter(reporter jaeger.Reporter) Option {
+ return func(c *Options) {
+ c.reporter = reporter
+ }
+}
+
+// Sampler can be provided explicitly to override the configuration.
+func Sampler(sampler jaeger.Sampler) Option {
+ return func(c *Options) {
+ c.sampler = sampler
+ }
+}
+
+// Observer can be registered with the Tracer to receive notifications about new Spans.
+func Observer(observer jaeger.Observer) Option {
+ return func(c *Options) {
+ c.observers = append(c.observers, observer)
+ }
+}
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new spans.
+func ContribObserver(observer jaeger.ContribObserver) Option {
+ return func(c *Options) {
+ c.contribObservers = append(c.contribObservers, observer)
+ }
+}
+
+// Gen128Bit specifies whether to generate 128bit trace IDs.
+func Gen128Bit(gen128Bit bool) Option {
+ return func(c *Options) {
+ c.gen128Bit = gen128Bit
+ }
+}
+
+// PoolSpans specifies whether to pool spans
+func PoolSpans(poolSpans bool) Option {
+ return func(c *Options) {
+ c.poolSpans = poolSpans
+ }
+}
+
+// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
+// and server spans a la zipkin. If false, client and server spans will be assigned
+// different IDs.
+func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
+ return func(c *Options) {
+ c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+// MaxTagValueLength can be provided to override the default max tag value length.
+func MaxTagValueLength(maxTagValueLength int) Option {
+ return func(c *Options) {
+ c.maxTagValueLength = maxTagValueLength
+ }
+}
+
+// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
+// when calling span.setSamplingPriority to force sample a span.
+func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
+ return func(c *Options) {
+ c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
+ }
+}
+
+// Tag creates an option that adds a tracer-level tag.
+func Tag(key string, value interface{}) Option {
+ return func(c *Options) {
+ c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
+ }
+}
+
+// Injector registers an Injector with the given format.
+func Injector(format interface{}, injector jaeger.Injector) Option {
+ return func(c *Options) {
+ c.injectors[format] = injector
+ }
+}
+
+// Extractor registers an Extractor with the given format.
+func Extractor(format interface{}, extractor jaeger.Extractor) Option {
+ return func(c *Options) {
+ c.extractors[format] = extractor
+ }
+}
+
+// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs.
+func WithRandomNumber(f func() uint64) Option {
+ return func(c *Options) {
+ c.randomNumber = f
+ }
+}
+
+func applyOptions(options ...Option) Options {
+ opts := Options{
+ injectors: make(map[interface{}]jaeger.Injector),
+ extractors: make(map[interface{}]jaeger.Extractor),
+ }
+ for _, option := range options {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = metrics.NullFactory
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
new file mode 100644
index 0000000..d8eb698
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+
+ "github.com/opentracing/opentracing-go"
+)
+
+const (
+ // JaegerClientVersion is the version of the client library reported as Span tag.
+ JaegerClientVersion = "Go-2.29.1"
+
+ // JaegerClientVersionTagKey is the name of the tag used to report client version.
+ JaegerClientVersionTagKey = "jaeger.version"
+
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader = "jaeger-debug-id"
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader = "jaeger-baggage"
+
+ // TracerHostnameTagKey used to report host name of the process.
+ TracerHostnameTagKey = "hostname"
+
+ // TracerIPTagKey used to report ip of the process.
+ TracerIPTagKey = "ip"
+
+ // TracerUUIDTagKey used to report UUID of the client process.
+ TracerUUIDTagKey = "client-uuid"
+
+ // SamplerTypeTagKey reports which sampler was used on the root span.
+ SamplerTypeTagKey = "sampler.type"
+
+ // SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
+ SamplerParamTagKey = "sampler.param"
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName = "uber-trace-id"
+
+ // TracerStateHeaderName is deprecated.
+ // Deprecated: use TraceContextHeaderName
+ TracerStateHeaderName = TraceContextHeaderName
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix = "uberctx-"
+
+ // SamplerTypeConst is the type of sampler that always makes the same decision.
+ SamplerTypeConst = "const"
+
+ // SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
+ SamplerTypeRemote = "remote"
+
+ // SamplerTypeProbabilistic is the type of sampler that samples traces
+ // with a certain fixed probability.
+ SamplerTypeProbabilistic = "probabilistic"
+
+ // SamplerTypeRateLimiting is the type of sampler that samples
+ // only up to a fixed number of traces per second.
+ SamplerTypeRateLimiting = "ratelimiting"
+
+ // SamplerTypeLowerBound is the type of sampler that samples
+ // at least a fixed number of traces per second.
+ SamplerTypeLowerBound = "lowerbound"
+
+ // DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
+ DefaultUDPSpanServerHost = "localhost"
+
+ // DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
+ DefaultUDPSpanServerPort = 6831
+
+ // DefaultSamplingServerPort is the default port to fetch sampling config from, via http
+ DefaultSamplingServerPort = 5778
+
+ // DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
+ DefaultMaxTagValueLength = 256
+
+ // SelfRefType is a jaeger specific reference type that supports creating a span
+ // with an already defined context.
+ selfRefType opentracing.SpanReferenceType = 99
+)
+
+var (
+ // DefaultSamplingServerURL is the default url to fetch sampling config from, via http
+ DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
new file mode 100644
index 0000000..4ce1881
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
+type ContribObserver interface {
+ // Create and return a span observer. Called when a span starts.
+ // If the Observer is not interested in the given span, it must return (nil, false).
+ // E.g :
+ // func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+ // var sp opentracing.Span
+ // sso := opentracing.StartSpanOptions{}
+ // if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
+ // // we have a valid SpanObserver
+ // }
+ // ...
+ // }
+ OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
+}
+
+// ContribSpanObserver is created by the Observer and receives notifications
+// about other Span events. This interface is meant to match
+// github.com/opentracing-contrib/go-observer, via duck typing, without
+// directly importing the go-observer package.
+type ContribSpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// wrapper observer for the old observers (see observer.go)
+type oldObserver struct {
+ obs Observer
+}
+
+func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
+ spanObserver := o.obs.OnStartSpan(operationName, options)
+ return spanObserver, spanObserver != nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
new file mode 100644
index 0000000..4f55490
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
+It is currently using Zipkin-compatible data model and can be directly
+itegrated with Zipkin backend (http://zipkin.io).
+
+For integration instructions please refer to the README:
+
+https://github.com/uber/jaeger-client-go/blob/master/README.md
+*/
+package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
new file mode 100644
index 0000000..c1ec339
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.lock
@@ -0,0 +1,105 @@
+hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed
+updated: 2020-07-31T13:30:37.242608-04:00
+imports:
+- name: github.com/beorn7/perks
+ version: 3a771d992973f24aa725d07868b467d1ddfceafb
+ subpackages:
+ - quantile
+- name: github.com/HdrHistogram/hdrhistogram-go
+ version: 3a0bb77429bd3a61596f5e8a3172445844342120
+- name: github.com/crossdock/crossdock-go
+ version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
+ subpackages:
+ - assert
+ - require
+- name: github.com/davecgh/go-spew
+ version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
+ subpackages:
+ - spew
+- name: github.com/golang/mock
+ version: 51421b967af1f557f93a59e0057aaf15ca02e29c
+ subpackages:
+ - gomock
+- name: github.com/golang/protobuf
+ version: b5d812f8a3706043e23a9cd5babf2e5423744d30
+ subpackages:
+ - proto
+- name: github.com/matttproud/golang_protobuf_extensions
+ version: c182affec369e30f25d3eb8cd8a478dee585ae7d
+ subpackages:
+ - pbutil
+- name: github.com/opentracing/opentracing-go
+ version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12
+ subpackages:
+ - ext
+ - harness
+ - log
+- name: github.com/pkg/errors
+ version: ba968bfe8b2f7e042a574c888954fccecfa385b4
+- name: github.com/pmezard/go-difflib
+ version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
+ subpackages:
+ - difflib
+- name: github.com/prometheus/client_golang
+ version: 170205fb58decfd011f1550d4cfb737230d7ae4f
+ subpackages:
+ - prometheus
+ - prometheus/internal
+- name: github.com/prometheus/client_model
+ version: fd36f4220a901265f90734c3183c5f0c91daa0b8
+ subpackages:
+ - go
+- name: github.com/prometheus/common
+ version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119
+ subpackages:
+ - expfmt
+ - internal/bitbucket.org/ww/goautoneg
+ - model
+- name: github.com/prometheus/procfs
+ version: 8a055596020d692cf491851e47ba3e302d9f90ce
+ subpackages:
+ - internal/fs
+ - internal/util
+- name: github.com/stretchr/testify
+ version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8
+ subpackages:
+ - assert
+ - mock
+ - require
+ - suite
+- name: github.com/uber-go/atomic
+ version: 845920076a298bdb984fb0f1b86052e4ca0a281c
+- name: github.com/uber/jaeger-lib
+ version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4
+ subpackages:
+ - metrics
+ - metrics/metricstest
+ - metrics/prometheus
+- name: go.uber.org/atomic
+ version: 845920076a298bdb984fb0f1b86052e4ca0a281c
+- name: go.uber.org/multierr
+ version: b587143a48b62b01d337824eab43700af6ffe222
+- name: go.uber.org/zap
+ version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd
+ subpackages:
+ - buffer
+ - internal/bufferpool
+ - internal/color
+ - internal/exit
+ - zapcore
+ - zaptest/observer
+- name: golang.org/x/net
+ version: addf6b3196f61cd44ce5a76657913698c73479d0
+ subpackages:
+ - context
+ - context/ctxhttp
+- name: golang.org/x/sys
+ version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64
+ subpackages:
+ - internal/unsafeheader
+ - windows
+- name: gopkg.in/yaml.v3
+ version: eeeca48fe7764f320e4870d231902bf9c1be2c08
+testImports:
+- name: github.com/stretchr/objx
+ version: 35313a95ee26395aa17d366c71a2ccf788fa69b6
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
new file mode 100644
index 0000000..295678c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml
@@ -0,0 +1,30 @@
+package: github.com/uber/jaeger-client-go
+import:
+- package: github.com/opentracing/opentracing-go
+ version: ^1.2
+ subpackages:
+ - ext
+ - log
+- package: github.com/crossdock/crossdock-go
+- package: github.com/uber/jaeger-lib
+ version: ^2.3.0
+ subpackages:
+ - metrics
+- package: github.com/pkg/errors
+ version: ~0.8.0
+- package: go.uber.org/zap
+ source: https://github.com/uber-go/zap.git
+ version: ^1
+- package: github.com/uber-go/atomic
+ version: ^1
+- package: github.com/prometheus/client_golang
+ version: 1.1
+- package: github.com/prometheus/procfs
+ version: 0.0.6
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+ - suite
+- package: github.com/golang/mock
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
new file mode 100644
index 0000000..5da7035
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// HeadersConfig contains the values for the header keys that Jaeger will use.
+// These values may be either custom or default depending on whether custom
+// values were provided via a configuration.
+type HeadersConfig struct {
+ // JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+ // if found in the carrier, forces the trace to be sampled as "debug" trace.
+ // The value of the header is recorded as the tag on the root span, so that the
+ // trace can be found in the UI using this value as a correlation ID.
+ JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
+
+ // JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+ // It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+ // a root span does not exist.
+ JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
+
+ // TraceContextHeaderName is the http header name used to propagate tracing context.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
+
+ // TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+ // This must be in lower-case to avoid mismatches when decoding incoming headers.
+ TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
+}
+
+// ApplyDefaults sets missing configuration keys to default values
+func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
+ if c.JaegerBaggageHeader == "" {
+ c.JaegerBaggageHeader = JaegerBaggageHeader
+ }
+ if c.JaegerDebugHeader == "" {
+ c.JaegerDebugHeader = JaegerDebugHeader
+ }
+ if c.TraceBaggageHeaderPrefix == "" {
+ c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
+ }
+ if c.TraceContextHeaderName == "" {
+ c.TraceContextHeaderName = TraceContextHeaderName
+ }
+ return c
+}
+
+func getDefaultHeadersConfig() *HeadersConfig {
+ return &HeadersConfig{
+ JaegerDebugHeader: JaegerDebugHeader,
+ JaegerBaggageHeader: JaegerBaggageHeader,
+ TraceContextHeaderName: TraceContextHeaderName,
+ TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
new file mode 100644
index 0000000..7457293
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultMaxValueLength = 2048
+ defaultRefreshInterval = time.Minute
+ defaultHostPort = "localhost:5778"
+)
+
+// Option is a function that sets some option on the RestrictionManager
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ denyBaggageOnInitializationFailure bool
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+}
+
+// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
+// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+func (options) DenyBaggageOnInitializationFailure(b bool) Option {
+ return func(o *options) {
+ o.denyBaggageOnInitializationFailure = b
+ }
+}
+
+// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the RestrictionManager.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
+// the baggage restrictions.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
new file mode 100644
index 0000000..2f58bb5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "context"
+ "fmt"
+ "net/url"
+ "sync"
+ "time"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+type httpBaggageRestrictionManagerProxy struct {
+ url string
+}
+
+func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ return &httpBaggageRestrictionManagerProxy{
+ url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
+ }
+}
+
+func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) {
+ var out []*thrift.BaggageRestriction
+ if err := utils.GetJSON(s.url, &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
+type RestrictionManager struct {
+ options
+
+ mux sync.RWMutex
+ serviceName string
+ restrictions map[string]*baggage.Restriction
+ thriftProxy thrift.BaggageRestrictionManager
+ pollStopped sync.WaitGroup
+ stopPoll chan struct{}
+ invalidRestriction *baggage.Restriction
+ validRestriction *baggage.Restriction
+
+ // Determines if the manager has successfully retrieved baggage restrictions from agent
+ initialized bool
+}
+
+// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
+// baggage restrictions.
+func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
+ // TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
+ // restrictionsMap will need to exist per service
+ opts := applyOptions(options...)
+ m := &RestrictionManager{
+ serviceName: serviceName,
+ options: opts,
+ restrictions: make(map[string]*baggage.Restriction),
+ thriftProxy: newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
+ stopPoll: make(chan struct{}),
+ invalidRestriction: baggage.NewRestriction(false, 0),
+ validRestriction: baggage.NewRestriction(true, defaultMaxValueLength),
+ }
+ m.pollStopped.Add(1)
+ go m.pollManager()
+ return m
+}
+
+// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
+func (m *RestrictionManager) isReady() bool {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ return m.initialized
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
+ m.mux.RLock()
+ defer m.mux.RUnlock()
+ if !m.initialized {
+ if m.denyBaggageOnInitializationFailure {
+ return m.invalidRestriction
+ }
+ return m.validRestriction
+ }
+ if restriction, ok := m.restrictions[key]; ok {
+ return restriction
+ }
+ return m.invalidRestriction
+}
+
+// Close stops remote polling and closes the RemoteRestrictionManager.
+func (m *RestrictionManager) Close() error {
+ close(m.stopPoll)
+ m.pollStopped.Wait()
+ return nil
+}
+
+func (m *RestrictionManager) pollManager() {
+ defer m.pollStopped.Done()
+ // attempt to initialize baggage restrictions
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
+ }
+ ticker := time.NewTicker(m.refreshInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ if err := m.updateRestrictions(); err != nil {
+ m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
+ }
+ case <-m.stopPoll:
+ return
+ }
+ }
+}
+
+func (m *RestrictionManager) updateRestrictions() error {
+ restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName)
+ if err != nil {
+ m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
+ return err
+ }
+ newRestrictions := m.parseRestrictions(restrictions)
+ m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
+ m.mux.Lock()
+ defer m.mux.Unlock()
+ m.initialized = true
+ m.restrictions = newRestrictions
+ return nil
+}
+
+func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
+ setters := make(map[string]*baggage.Restriction, len(restrictions))
+ for _, restriction := range restrictions {
+ setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
+ }
+ return setters
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
new file mode 100644
index 0000000..c16a5c5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage
+
+const (
+ defaultMaxValueLength = 2048
+)
+
+// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
+type Restriction struct {
+ keyAllowed bool
+ maxValueLength int
+}
+
+// NewRestriction returns a new Restriction.
+func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
+ return &Restriction{
+ keyAllowed: keyAllowed,
+ maxValueLength: maxValueLength,
+ }
+}
+
+// KeyAllowed returns whether the baggage key for this restriction is allowed.
+func (r *Restriction) KeyAllowed() bool {
+ return r.keyAllowed
+}
+
+// MaxValueLength returns the max length for the baggage value.
+func (r *Restriction) MaxValueLength() int {
+ return r.maxValueLength
+}
+
+// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
+// will return a Restriction for a specific baggage key which will determine whether the baggage
+// key is allowed for the current service and any other applicable restrictions on the baggage
+// value.
+type RestrictionManager interface {
+ GetRestriction(service, key string) *Restriction
+}
+
+// DefaultRestrictionManager allows any baggage key.
+type DefaultRestrictionManager struct {
+ defaultRestriction *Restriction
+}
+
+// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
+func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
+ if maxValueLength == 0 {
+ maxValueLength = defaultMaxValueLength
+ }
+ return &DefaultRestrictionManager{
+ defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
+ }
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
+ return m.defaultRestriction
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
new file mode 100644
index 0000000..fe0bef2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reporterstats
+
+// ReporterStats exposes some metrics from the RemoteReporter.
+type ReporterStats interface {
+ SpansDroppedFromQueue() int64
+}
+
+// Receiver can be implemented by a Transport to be given ReporterStats.
+type Receiver interface {
+ SetReporterStats(ReporterStats)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
new file mode 100644
index 0000000..0e10b8a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spanlog
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+)
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+// TODO refactor into pluggable materializer
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+ fields := fieldsAsMap(make(map[string]string, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(fields)
+ }
+ if event, ok := fields["event"]; ok && len(fields) == 1 {
+ return []byte(event), nil
+ }
+ return json.Marshal(fields)
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+ ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+ ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+ ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+ ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+ ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+ value(ml)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
new file mode 100644
index 0000000..f52c322
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go"
+)
+
+const (
+ defaultHostPort = "localhost:5778"
+ defaultRefreshInterval = time.Second * 5
+)
+
+// Option is a function that sets some option on the Throttler
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+ metrics *jaeger.Metrics
+ logger jaeger.Logger
+ hostPort string
+ refreshInterval time.Duration
+ synchronousInitialization bool
+}
+
+// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+ return func(o *options) {
+ o.metrics = m
+ }
+}
+
+// Logger creates an Option that sets the logger used by the Throttler.
+func (options) Logger(logger jaeger.Logger) Option {
+ return func(o *options) {
+ o.logger = logger
+ }
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
+func (options) HostPort(hostPort string) Option {
+ return func(o *options) {
+ o.hostPort = hostPort
+ }
+}
+
+// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
+// credits.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+ return func(o *options) {
+ o.refreshInterval = refreshInterval
+ }
+}
+
+// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
+// fetch credits from the agent when an operation is seen for the first time. This should be set to true
+// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
+// such that sampling or throttling occurs.
+func (options) SynchronousInitialization(b bool) Option {
+ return func(o *options) {
+ o.synchronousInitialization = b
+ }
+}
+
+func applyOptions(o ...Option) options {
+ opts := options{}
+ for _, option := range o {
+ option(&opts)
+ }
+ if opts.metrics == nil {
+ opts.metrics = jaeger.NewNullMetrics()
+ }
+ if opts.logger == nil {
+ opts.logger = jaeger.NullLogger
+ }
+ if opts.hostPort == "" {
+ opts.hostPort = defaultHostPort
+ }
+ if opts.refreshInterval == 0 {
+ opts.refreshInterval = defaultRefreshInterval
+ }
+ return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
new file mode 100644
index 0000000..20f434f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+ "fmt"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/uber/jaeger-client-go"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // minimumCredits is the minimum amount of credits necessary to not be throttled.
+ // i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
+ minimumCredits = 1.0
+)
+
+var (
+ errorUUIDNotSet = errors.New("Throttler UUID must be set")
+)
+
+type operationBalance struct {
+ Operation string `json:"operation"`
+ Balance float64 `json:"balance"`
+}
+
+type creditResponse struct {
+ Balances []operationBalance `json:"balances"`
+}
+
+type httpCreditManagerProxy struct {
+ hostPort string
+}
+
+func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
+ return &httpCreditManagerProxy{
+ hostPort: hostPort,
+ }
+}
+
+// N.B. Operations list must not be empty.
+func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
+ params := url.Values{}
+ params.Set("service", serviceName)
+ params.Set("uuid", uuid)
+ for _, op := range operations {
+ params.Add("operations", op)
+ }
+ var resp creditResponse
+ if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
+ return nil, errors.Wrap(err, "Failed to receive credits from agent")
+ }
+ return &resp, nil
+}
+
+// Throttler retrieves credits from agent and uses it to throttle operations.
+type Throttler struct {
+ options
+
+ mux sync.RWMutex
+ service string
+ uuid atomic.Value
+ creditManager *httpCreditManagerProxy
+ credits map[string]float64 // map of operation->credits
+ close chan struct{}
+ stopped sync.WaitGroup
+}
+
+// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
+// the service.
+func NewThrottler(service string, options ...Option) *Throttler {
+ opts := applyOptions(options...)
+ creditManager := newHTTPCreditManagerProxy(opts.hostPort)
+ t := &Throttler{
+ options: opts,
+ creditManager: creditManager,
+ service: service,
+ credits: make(map[string]float64),
+ close: make(chan struct{}),
+ }
+ t.stopped.Add(1)
+ go t.pollManager()
+ return t
+}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t *Throttler) IsAllowed(operation string) bool {
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ value, ok := t.credits[operation]
+ if !ok || value == 0 {
+ if !ok {
+ // NOTE: This appears to be a no-op at first glance, but it stores
+ // the operation key in the map. Necessary for functionality of
+ // Throttler#operations method.
+ t.credits[operation] = 0
+ }
+ if !t.synchronousInitialization {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ // If it is the first time this operation is being checked, synchronously fetch
+ // the credits.
+ credits, err := t.fetchCredits([]string{operation})
+ if err != nil {
+ // Failed to receive credits from agent, try again next time
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return false
+ }
+ if len(credits.Balances) == 0 {
+ // This shouldn't happen but just in case
+ return false
+ }
+ for _, opBalance := range credits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+ }
+ return t.isAllowed(operation)
+}
+
+// Close stops the throttler from fetching credits from remote.
+func (t *Throttler) Close() error {
+ close(t.close)
+ t.stopped.Wait()
+ return nil
+}
+
+// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
+// requests are made.
+func (t *Throttler) SetProcess(process jaeger.Process) {
+ if process.UUID != "" {
+ t.uuid.Store(process.UUID)
+ }
+}
+
+// N.B. This function must be called with the Write Lock
+func (t *Throttler) isAllowed(operation string) bool {
+ credits := t.credits[operation]
+ if credits < minimumCredits {
+ t.metrics.ThrottledDebugSpans.Inc(1)
+ return false
+ }
+ t.credits[operation] = credits - minimumCredits
+ return true
+}
+
+func (t *Throttler) pollManager() {
+ defer t.stopped.Done()
+ ticker := time.NewTicker(t.refreshInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ticker.C:
+ t.refreshCredits()
+ case <-t.close:
+ return
+ }
+ }
+}
+
+func (t *Throttler) operations() []string {
+ t.mux.RLock()
+ defer t.mux.RUnlock()
+ operations := make([]string, 0, len(t.credits))
+ for op := range t.credits {
+ operations = append(operations, op)
+ }
+ return operations
+}
+
+func (t *Throttler) refreshCredits() {
+ operations := t.operations()
+ if len(operations) == 0 {
+ return
+ }
+ newCredits, err := t.fetchCredits(operations)
+ if err != nil {
+ t.metrics.ThrottlerUpdateFailure.Inc(1)
+ t.logger.Error("Failed to fetch credits: " + err.Error())
+ return
+ }
+ t.metrics.ThrottlerUpdateSuccess.Inc(1)
+
+ t.mux.Lock()
+ defer t.mux.Unlock()
+ for _, opBalance := range newCredits.Balances {
+ t.credits[opBalance.Operation] += opBalance.Balance
+ }
+}
+
+func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
+ uuid := t.uuid.Load()
+ uuidStr, _ := uuid.(string)
+ if uuid == nil || uuidStr == "" {
+ return nil, errorUUIDNotSet
+ }
+ return t.creditManager.FetchCredits(uuidStr, t.service, operations)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
new file mode 100644
index 0000000..196ed69
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package throttler
+
+// Throttler is used to rate limits operations. For example, given how debug spans
+// are always sampled, a throttler can be enabled per client to rate limit the amount
+// of debug spans a client can start.
+type Throttler interface {
+ // IsAllowed determines whether the operation should be allowed and not be
+ // throttled.
+ IsAllowed(operation string) bool
+}
+
+// DefaultThrottler doesn't throttle at all.
+type DefaultThrottler struct{}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t DefaultThrottler) IsAllowed(operation string) bool {
+ return true
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
new file mode 100644
index 0000000..8402d08
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/interop.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// TODO this file should not be needed after TChannel PR.
+
+type formatKey int
+
+// SpanContextFormat is a constant used as OpenTracing Format.
+// Requires *SpanContext as carrier.
+// This format is intended for interop with TChannel or other Zipkin-like tracers.
+const SpanContextFormat formatKey = iota
+
+type jaegerTraceContextPropagator struct {
+ tracer *Tracer
+}
+
+func (p *jaegerTraceContextPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.CopyFrom(&ctx)
+ return nil
+}
+
+func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(*SpanContext)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ ctx := new(SpanContext)
+ ctx.CopyFrom(carrier)
+ return *ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
new file mode 100644
index 0000000..868b2a5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+
+ "github.com/opentracing/opentracing-go/log"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+type tags []*j.Tag
+
+// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
+func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
+ fields := tags(make([]*j.Tag, 0, len(logFields)))
+ for _, field := range logFields {
+ field.Marshal(&fields)
+ }
+ return fields
+}
+
+func (t *tags) EmitString(key, value string) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
+}
+
+func (t *tags) EmitBool(key string, value bool) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
+}
+
+func (t *tags) EmitInt(key string, value int) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt32(key string, value int32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt64(key string, value int64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
+}
+
+func (t *tags) EmitUint32(key string, value uint32) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitUint64(key string, value uint64) {
+ vLong := int64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitFloat32(key string, value float32) {
+ vDouble := float64(value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
+}
+
+func (t *tags) EmitFloat64(key string, value float64) {
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
+}
+
+func (t *tags) EmitObject(key string, value interface{}) {
+ vStr := fmt.Sprintf("%+v", value)
+ *t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
+}
+
+func (t *tags) EmitLazyLogger(value log.LazyLogger) {
+ value(t)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
new file mode 100644
index 0000000..3ac2f8f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// BuildJaegerThrift builds jaeger span based on internal span.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerThrift(span *Span) *j.Span {
+ span.Lock()
+ defer span.Unlock()
+ startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ jaegerSpan := &j.Span{
+ TraceIdLow: int64(span.context.traceID.Low),
+ TraceIdHigh: int64(span.context.traceID.High),
+ SpanId: int64(span.context.spanID),
+ ParentSpanId: int64(span.context.parentID),
+ OperationName: span.operationName,
+ Flags: int32(span.context.samplingState.flags()),
+ StartTime: startTime,
+ Duration: duration,
+ Tags: buildTags(span.tags, span.tracer.options.maxTagValueLength),
+ Logs: buildLogs(span.logs),
+ References: buildReferences(span.references),
+ }
+ return jaegerSpan
+}
+
+// BuildJaegerProcessThrift creates a thrift Process type.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerProcessThrift(span *Span) *j.Process {
+ span.Lock()
+ defer span.Unlock()
+ return buildJaegerProcessThrift(span.tracer)
+}
+
+func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
+ process := &j.Process{
+ ServiceName: tracer.serviceName,
+ Tags: buildTags(tracer.tags, tracer.options.maxTagValueLength),
+ }
+ if tracer.process.UUID != "" {
+ process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
+ }
+ return process
+}
+
+func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
+ jTags := make([]*j.Tag, 0, len(tags))
+ for _, tag := range tags {
+ jTag := buildTag(&tag, maxTagValueLength)
+ jTags = append(jTags, jTag)
+ }
+ return jTags
+}
+
+func buildLogs(logs []opentracing.LogRecord) []*j.Log {
+ jLogs := make([]*j.Log, 0, len(logs))
+ for _, log := range logs {
+ jLog := &j.Log{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Fields: ConvertLogsToJaegerTags(log.Fields),
+ }
+ jLogs = append(jLogs, jLog)
+ }
+ return jLogs
+}
+
+func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
+ jTag := &j.Tag{Key: tag.key}
+ switch value := tag.value.(type) {
+ case string:
+ vStr := truncateString(value, maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ case []byte:
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ jTag.VBinary = value
+ jTag.VType = j.TagType_BINARY
+ case int:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint8:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint16:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint32:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case int64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case uint64:
+ vLong := int64(value)
+ jTag.VLong = &vLong
+ jTag.VType = j.TagType_LONG
+ case float32:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case float64:
+ vDouble := float64(value)
+ jTag.VDouble = &vDouble
+ jTag.VType = j.TagType_DOUBLE
+ case bool:
+ vBool := value
+ jTag.VBool = &vBool
+ jTag.VType = j.TagType_BOOL
+ default:
+ vStr := truncateString(stringify(value), maxTagValueLength)
+ jTag.VStr = &vStr
+ jTag.VType = j.TagType_STRING
+ }
+ return jTag
+}
+
+func buildReferences(references []Reference) []*j.SpanRef {
+ retMe := make([]*j.SpanRef, 0, len(references))
+ for _, ref := range references {
+ if ref.Type == opentracing.ChildOfRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
+ } else if ref.Type == opentracing.FollowsFromRef {
+ retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
+ }
+ }
+ return retMe
+}
+
+func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
+ return &j.SpanRef{
+ RefType: refType,
+ TraceIdLow: int64(ctx.traceID.Low),
+ TraceIdHigh: int64(ctx.traceID.High),
+ SpanId: int64(ctx.spanID),
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
new file mode 100644
index 0000000..ced6e0c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "sync"
+)
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// Debugf logs a message at debug priority
+func (l *stdLogger) Debugf(msg string, args ...interface{}) {
+ log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...)
+}
+
+// NullLogger is implementation of the Logger interface that is no-op
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
+func (l *nullLogger) Debugf(msg string, args ...interface{}) {}
+
+// BytesBufferLogger implements Logger backed by a bytes.Buffer.
+type BytesBufferLogger struct {
+ mux sync.Mutex
+ buf bytes.Buffer
+}
+
+// Error implements Logger.
+func (l *BytesBufferLogger) Error(msg string) {
+ l.mux.Lock()
+ l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
+ l.mux.Unlock()
+}
+
+// Infof implements Logger.
+func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
+ l.mux.Lock()
+ l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
+ l.mux.Unlock()
+}
+
+// Debugf implements Logger.
+func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) {
+ l.mux.Lock()
+ l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n")
+ l.mux.Unlock()
+}
+
+// String returns string representation of the underlying buffer.
+func (l *BytesBufferLogger) String() string {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ return l.buf.String()
+}
+
+// Flush empties the underlying buffer.
+func (l *BytesBufferLogger) Flush() {
+ l.mux.Lock()
+ defer l.mux.Unlock()
+ l.buf.Reset()
+}
+
+// DebugLogger is an interface which adds a debug logging level
+type DebugLogger interface {
+ Logger
+
+ // Debugf logs a message at debug priority
+ Debugf(msg string, args ...interface{})
+}
+
+// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger
+// If the provided Logger doesn't satisfy the interface, a logger with debug
+// disabled is returned
+func DebugLogAdapter(logger Logger) DebugLogger {
+ if logger == nil {
+ return nil
+ }
+ if debugLogger, ok := logger.(DebugLogger); ok {
+ return debugLogger
+ }
+ logger.Infof("debug logging disabled")
+ return debugDisabledLogAdapter{logger: logger}
+}
+
+type debugDisabledLogAdapter struct {
+ logger Logger
+}
+
+func (d debugDisabledLogAdapter) Error(msg string) {
+ d.logger.Error(msg)
+}
+
+func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) {
+ d.logger.Infof(msg, args...)
+}
+
+// Debugf is a nop
+func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
new file mode 100644
index 0000000..d4f0b50
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/logger.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "log"
+
+// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+ // Error logs a message at error priority
+ Error(msg string)
+
+ // Infof logs a message at info priority
+ Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+ log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+ log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that delegates to default `log` package
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string) {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
new file mode 100644
index 0000000..50e4e22
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+// Metrics is a container of all stats emitted by Jaeger tracer.
+type Metrics struct {
+ // Number of traces started by this tracer as sampled
+ TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
+
+ // Number of traces started by this tracer as not sampled
+ TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+
+ // Number of traces started by this tracer with delayed sampling
+ TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
+
+ // Number of externally started sampled traces this tracer joined
+ TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
+
+ // Number of externally started not-sampled traces this tracer joined
+ TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
+
+ // Number of sampled spans started by this tracer
+ SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
+
+ // Number of not sampled spans started by this tracer
+ SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
+
+ // Number of spans with delayed sampling started by this tracer
+ SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
+
+ // Number of spans finished by this tracer
+ SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
+
+ // Number of spans finished by this tracer
+ SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
+
+ // Number of errors decoding tracing context
+ DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
+
+ // Number of spans successfully reported
+ ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
+
+ // Number of spans not reported due to a Sender failure
+ ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
+
+ // Number of spans dropped due to internal queue overflow
+ ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
+
+ // Current number of spans in the reporter queue
+ ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
+
+ // Number of times the Sampler succeeded to retrieve sampling strategy
+ SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
+
+ // Number of times the Sampler failed to retrieve sampling strategy
+ SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
+
+ // Number of times the Sampler succeeded to retrieve and update sampling strategy
+ SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
+
+ // Number of times the Sampler failed to update sampling strategy
+ SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
+
+ // Number of times baggage was successfully written or updated on spans.
+ BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
+
+ // Number of times baggage failed to write or update on spans.
+ BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
+
+ // Number of times baggage was truncated as per baggage restrictions.
+ BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
+
+ // Number of times baggage restrictions were successfully updated.
+ BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
+
+ // Number of times baggage restrictions failed to update.
+ BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
+
+ // Number of times debug spans were throttled.
+ ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
+
+ // Number of times throttler successfully updated.
+ ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
+
+ // Number of times throttler failed to update.
+ ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
+}
+
+// NewMetrics creates a new Metrics struct and initializes it.
+func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
+ m := &Metrics{}
+ // TODO the namespace "jaeger" should be configurable
+ metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
+ return m
+}
+
+// NewNullMetrics creates a new Metrics struct that won't report any metrics.
+func NewNullMetrics() *Metrics {
+ return NewMetrics(metrics.NullFactory, nil)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
new file mode 100644
index 0000000..7bbd028
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/observer.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with the Tracer to receive notifications about
+// new Spans.
+//
+// Deprecated: use jaeger.ContribObserver instead.
+type Observer interface {
+ OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+//
+// Deprecated: use jaeger.ContribSpanObserver instead.
+type SpanObserver interface {
+ OnSetOperationName(operationName string)
+ OnSetTag(key string, value interface{})
+ OnFinish(options opentracing.FinishOptions)
+}
+
+// compositeObserver is a dispatcher to other observers
+type compositeObserver struct {
+ observers []ContribObserver
+}
+
+// compositeSpanObserver is a dispatcher to other span observers
+type compositeSpanObserver struct {
+ observers []ContribSpanObserver
+}
+
+// noopSpanObserver is used when there are no observers registered
+// on the Tracer or none of them returns span observers from OnStartSpan.
+var noopSpanObserver = &compositeSpanObserver{}
+
+func (o *compositeObserver) append(contribObserver ContribObserver) {
+ o.observers = append(o.observers, contribObserver)
+}
+
+func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
+ var spanObservers []ContribSpanObserver
+ for _, obs := range o.observers {
+ spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+ if ok {
+ if spanObservers == nil {
+ spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
+ }
+ spanObservers = append(spanObservers, spanObs)
+ }
+ }
+ if len(spanObservers) == 0 {
+ return noopSpanObserver
+ }
+ return &compositeSpanObserver{observers: spanObservers}
+}
+
+func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
+ for _, obs := range o.observers {
+ obs.OnSetOperationName(operationName)
+ }
+}
+
+func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
+ for _, obs := range o.observers {
+ obs.OnSetTag(key, value)
+ }
+}
+
+func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
+ for _, obs := range o.observers {
+ obs.OnFinish(options)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
new file mode 100644
index 0000000..30cbf99
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/process.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// Process holds process specific metadata that's relevant to this client.
+type Process struct {
+ Service string
+ UUID string
+ Tags []Tag
+}
+
+// ProcessSetter sets a process. This can be used by any class that requires
+// the process to be set as part of initialization.
+// See internal/throttler/remote/throttler.go for an example.
+type ProcessSetter interface {
+ SetProcess(process Process)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
new file mode 100644
index 0000000..e06459b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -0,0 +1,325 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "log"
+ "net/url"
+ "strings"
+ "sync"
+
+ opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Injector is responsible for injecting SpanContext instances in a manner suitable
+// for propagation via a format-specific "carrier" object. Typically the
+// injection will take place across an RPC boundary, but message queues and
+// other IPC mechanisms are also reasonable places to use an Injector.
+type Injector interface {
+ // Inject takes `SpanContext` and injects it into `carrier`. The actual type
+ // of `carrier` depends on the `format` passed to `Tracer.Inject()`.
+ //
+ // Implementations may return opentracing.ErrInvalidCarrier or any other
+ // implementation-specific error if injection fails.
+ Inject(ctx SpanContext, carrier interface{}) error
+}
+
+// Extractor is responsible for extracting SpanContext instances from a
+// format-specific "carrier" object. Typically the extraction will take place
+// on the server side of an RPC boundary, but message queues and other IPC
+// mechanisms are also reasonable places to use an Extractor.
+type Extractor interface {
+ // Extract decodes a SpanContext instance from the given `carrier`,
+ // or (nil, opentracing.ErrSpanContextNotFound) if no context could
+ // be found in the `carrier`.
+ Extract(carrier interface{}) (SpanContext, error)
+}
+
+// TextMapPropagator is a combined Injector and Extractor for TextMap format
+type TextMapPropagator struct {
+ headerKeys *HeadersConfig
+ metrics Metrics
+ encodeValue func(string) string
+ decodeValue func(string) string
+}
+
+// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
+func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+ return &TextMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return val
+ },
+ decodeValue: func(val string) string {
+ return val
+ },
+ }
+}
+
+// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
+func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+ return &TextMapPropagator{
+ headerKeys: headerKeys,
+ metrics: metrics,
+ encodeValue: func(val string) string {
+ return url.QueryEscape(val)
+ },
+ decodeValue: func(val string) string {
+ // ignore decoding errors, cannot do anything about them
+ if v, err := url.QueryUnescape(val); err == nil {
+ return v
+ }
+ return val
+ },
+ }
+}
+
+// BinaryPropagator is a combined Injector and Extractor for Binary format
+type BinaryPropagator struct {
+ tracer *Tracer
+ buffers sync.Pool
+}
+
+// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
+func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
+ return &BinaryPropagator{
+ tracer: tracer,
+ buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
+ }
+}
+
+// Inject implements Injector of TextMapPropagator
+func (p *TextMapPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Do not encode the string with trace context to avoid accidental double-encoding
+ // if people are using opentracing < 0.10.0. Our colon-separated representation
+ // of the trace context is already safe for HTTP headers.
+ textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
+ for k, v := range sc.baggage {
+ safeKey := p.addBaggageKeyPrefix(k)
+ safeVal := p.encodeValue(v)
+ textMapWriter.Set(safeKey, safeVal)
+ }
+ return nil
+}
+
+// Extract implements Extractor of TextMapPropagator
+func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+ var baggage map[string]string
+ err := textMapReader.ForeachKey(func(rawKey, value string) error {
+ key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
+ if key == p.headerKeys.TraceContextHeaderName {
+ var err error
+ safeVal := p.decodeValue(value)
+ if ctx, err = ContextFromString(safeVal); err != nil {
+ return err
+ }
+ } else if key == p.headerKeys.JaegerDebugHeader {
+ ctx.debugID = p.decodeValue(value)
+ } else if key == p.headerKeys.JaegerBaggageHeader {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ for k, v := range p.parseCommaSeparatedMap(value) {
+ baggage[k] = v
+ }
+ } else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
+ if baggage == nil {
+ baggage = make(map[string]string)
+ }
+ safeKey := p.removeBaggageKeyPrefix(key)
+ safeVal := p.decodeValue(value)
+ baggage[safeKey] = safeVal
+ }
+ return nil
+ })
+ if err != nil {
+ p.metrics.DecodingErrors.Inc(1)
+ return emptyContext, err
+ }
+ if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ ctx.baggage = baggage
+ return ctx, nil
+}
+
+// Inject implements Injector of BinaryPropagator
+func (p *BinaryPropagator) Inject(
+ sc SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(io.Writer)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ // Handle the tracer context
+ if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
+ return err
+ }
+ if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
+ return err
+ }
+
+ // Handle the baggage items
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
+ return err
+ }
+ for k, v := range sc.baggage {
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, k)
+ if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
+ return err
+ }
+ io.WriteString(carrier, v)
+ }
+
+ return nil
+}
+
+// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits
+const (
+ maxBinaryBaggage = 180
+ maxBinaryNameValueLen = 4096
+)
+
+// Extract implements Extractor of BinaryPropagator
+func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(io.Reader)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ var ctx SpanContext
+ ctx.samplingState = &samplingState{}
+
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+
+ var flags byte
+ if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ ctx.samplingState.setFlags(flags)
+
+ // Handle the baggage items
+ var numBaggage int32
+ if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if numBaggage > maxBinaryBaggage {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
+ ctx.baggage = make(map[string]string, iNumBaggage)
+ buf := p.buffers.Get().(*bytes.Buffer)
+ defer p.buffers.Put(buf)
+
+ var keyLen, valLen int32
+ for i := 0; i < iNumBaggage; i++ {
+ if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(keyLen))
+ if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ key := buf.String()
+
+ if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ if keyLen+valLen > maxBinaryNameValueLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ buf.Reset()
+ buf.Grow(int(valLen))
+ if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
+ return emptyContext, opentracing.ErrSpanContextCorrupted
+ }
+ ctx.baggage[key] = buf.String()
+ }
+ }
+
+ return ctx, nil
+}
+
+// Converts a comma separated key value pair list into a map
+// e.g. key1=value1, key2=value2, key3 = value3
+// is converted to map[string]string { "key1" : "value1",
+// "key2" : "value2",
+// "key3" : "value3" }
+func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+ baggage := make(map[string]string)
+ value, err := url.QueryUnescape(value)
+ if err != nil {
+ log.Printf("Unable to unescape %s, %v", value, err)
+ return baggage
+ }
+ for _, kvpair := range strings.Split(value, ",") {
+ kv := strings.Split(strings.TrimSpace(kvpair), "=")
+ if len(kv) == 2 {
+ baggage[strings.TrimSpace(kv[0])] = kv[1]
+ } else {
+ log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
+ }
+ }
+ return baggage
+}
+
+// Converts a baggage item key into an http header format,
+// by prepending TraceBaggageHeaderPrefix and encoding the key string
+func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
+ // TODO encodeBaggageKeyAsHeader add caching and escaping
+ return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
+}
+
+func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
+ // TODO decodeBaggageHeaderKey add caching and escaping
+ return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
new file mode 100644
index 0000000..5646e78
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reference.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "github.com/opentracing/opentracing-go"
+
+// Reference represents a causal reference to other Spans (via their SpanContext).
+type Reference struct {
+ Type opentracing.SpanReferenceType
+ Context SpanContext
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
new file mode 100644
index 0000000..a71a92c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -0,0 +1,322 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/internal/reporterstats"
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
+type Reporter interface {
+ // Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+ // If the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed, to avoid span data corruption.
+ Report(span *Span)
+
+ // Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
+ Close()
+}
+
+// ------------------------------
+
+type nullReporter struct{}
+
+// NewNullReporter creates a no-op reporter that ignores all reported spans.
+func NewNullReporter() Reporter {
+ return &nullReporter{}
+}
+
+// Report implements Report() method of Reporter by doing nothing.
+func (r *nullReporter) Report(span *Span) {
+ // no-op
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *nullReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+type loggingReporter struct {
+ logger Logger
+}
+
+// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
+func NewLoggingReporter(logger Logger) Reporter {
+ return &loggingReporter{logger}
+}
+
+// Report implements Report() method of Reporter by logging the span to the logger.
+func (r *loggingReporter) Report(span *Span) {
+ r.logger.Infof("Reporting span %+v", span)
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *loggingReporter) Close() {
+ // no-op
+}
+
+// ------------------------------
+
+// InMemoryReporter is used for testing, and simply collects spans in memory.
+type InMemoryReporter struct {
+ spans []opentracing.Span
+ lock sync.Mutex
+}
+
+// NewInMemoryReporter creates a reporter that stores spans in memory.
+// NOTE: the Tracer should be created with options.PoolSpans = false.
+func NewInMemoryReporter() *InMemoryReporter {
+ return &InMemoryReporter{
+ spans: make([]opentracing.Span, 0, 10),
+ }
+}
+
+// Report implements Report() method of Reporter by storing the span in the buffer.
+func (r *InMemoryReporter) Report(span *Span) {
+ r.lock.Lock()
+ // Need to retain the span otherwise it will be released
+ r.spans = append(r.spans, span.Retain())
+ r.lock.Unlock()
+}
+
+// Close implements Close() method of Reporter
+func (r *InMemoryReporter) Close() {
+ r.Reset()
+}
+
+// SpansSubmitted returns the number of spans accumulated in the buffer.
+func (r *InMemoryReporter) SpansSubmitted() int {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ return len(r.spans)
+}
+
+// GetSpans returns accumulated spans as a copy of the buffer.
+func (r *InMemoryReporter) GetSpans() []opentracing.Span {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+ copied := make([]opentracing.Span, len(r.spans))
+ copy(copied, r.spans)
+ return copied
+}
+
+// Reset clears all accumulated spans.
+func (r *InMemoryReporter) Reset() {
+ r.lock.Lock()
+ defer r.lock.Unlock()
+
+ // Before reset the collection need to release Span memory
+ for _, span := range r.spans {
+ span.(*Span).Release()
+ }
+ r.spans = r.spans[:0]
+}
+
+// ------------------------------
+
+type compositeReporter struct {
+ reporters []Reporter
+}
+
+// NewCompositeReporter creates a reporter that ignores all reported spans.
+func NewCompositeReporter(reporters ...Reporter) Reporter {
+ return &compositeReporter{reporters: reporters}
+}
+
+// Report implements Report() method of Reporter by delegating to each underlying reporter.
+func (r *compositeReporter) Report(span *Span) {
+ for _, reporter := range r.reporters {
+ reporter.Report(span)
+ }
+}
+
+// Close implements Close() method of Reporter by closing each underlying reporter.
+func (r *compositeReporter) Close() {
+ for _, reporter := range r.reporters {
+ reporter.Close()
+ }
+}
+
+// ------------- REMOTE REPORTER -----------------
+
+type reporterQueueItemType int
+
+const (
+ defaultQueueSize = 100
+ defaultBufferFlushInterval = 1 * time.Second
+
+ reporterQueueItemSpan reporterQueueItemType = iota
+ reporterQueueItemClose
+)
+
+type reporterQueueItem struct {
+ itemType reporterQueueItemType
+ span *Span
+ close *sync.WaitGroup
+}
+
+// reporterStats implements reporterstats.ReporterStats.
+type reporterStats struct {
+ droppedCount int64 // provided to Transports to report data loss to the backend
+}
+
+// SpansDroppedFromQueue implements reporterstats.ReporterStats.
+func (r *reporterStats) SpansDroppedFromQueue() int64 {
+ return atomic.LoadInt64(&r.droppedCount)
+}
+
+func (r *reporterStats) incDroppedCount() {
+ atomic.AddInt64(&r.droppedCount, 1)
+}
+
+type remoteReporter struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ queueLength int64 // used to update metrics.Gauge
+ closed int64 // 0 - not closed, 1 - closed
+
+ reporterOptions
+
+ sender Transport
+ queue chan reporterQueueItem
+ reporterStats *reporterStats
+}
+
+// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
+// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
+// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
+// Calls to Close() block until all spans reported prior to the call to Close are flushed.
+func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
+ options := reporterOptions{}
+ for _, option := range opts {
+ option(&options)
+ }
+ if options.bufferFlushInterval <= 0 {
+ options.bufferFlushInterval = defaultBufferFlushInterval
+ }
+ if options.logger == nil {
+ options.logger = log.NullLogger
+ }
+ if options.metrics == nil {
+ options.metrics = NewNullMetrics()
+ }
+ if options.queueSize <= 0 {
+ options.queueSize = defaultQueueSize
+ }
+ reporter := &remoteReporter{
+ reporterOptions: options,
+ sender: sender,
+ queue: make(chan reporterQueueItem, options.queueSize),
+ reporterStats: new(reporterStats),
+ }
+ if receiver, ok := sender.(reporterstats.Receiver); ok {
+ receiver.SetReporterStats(reporter.reporterStats)
+ }
+ go reporter.processQueue()
+ return reporter
+}
+
+// Report implements Report() method of Reporter.
+// It passes the span to a background go-routine for submission to Jaeger backend.
+// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
+// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
+// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
+// because some of them may still be successfully added to the queue.
+func (r *remoteReporter) Report(span *Span) {
+ select {
+ // Need to retain the span otherwise it will be released
+ case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
+ atomic.AddInt64(&r.queueLength, 1)
+ default:
+ r.metrics.ReporterDropped.Inc(1)
+ r.reporterStats.incDroppedCount()
+ }
+}
+
+// Close implements Close() method of Reporter by waiting for the queue to be drained.
+func (r *remoteReporter) Close() {
+ r.logger.Debugf("closing reporter")
+ if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
+ r.logger.Error("Repeated attempt to close the reporter is ignored")
+ return
+ }
+ r.sendCloseEvent()
+ _ = r.sender.Close()
+}
+
+func (r *remoteReporter) sendCloseEvent() {
+ wg := &sync.WaitGroup{}
+ wg.Add(1)
+ item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
+
+ r.queue <- item // if the queue is full we will block until there is space
+ atomic.AddInt64(&r.queueLength, 1)
+ wg.Wait()
+}
+
+// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
+// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
+// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
+// reporting new spans.
+func (r *remoteReporter) processQueue() {
+ // flush causes the Sender to flush its accumulated spans and clear the buffer
+ flush := func() {
+ if flushed, err := r.sender.Flush(); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ }
+ }
+
+ timer := time.NewTicker(r.bufferFlushInterval)
+ for {
+ select {
+ case <-timer.C:
+ flush()
+ case item := <-r.queue:
+ atomic.AddInt64(&r.queueLength, -1)
+ switch item.itemType {
+ case reporterQueueItemSpan:
+ span := item.span
+ if flushed, err := r.sender.Append(span); err != nil {
+ r.metrics.ReporterFailure.Inc(int64(flushed))
+ r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
+ } else if flushed > 0 {
+ r.metrics.ReporterSuccess.Inc(int64(flushed))
+ // to reduce the number of gauge stats, we only emit queue length on flush
+ r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
+ r.logger.Debugf("flushed %d spans", flushed)
+ }
+ span.Release()
+ case reporterQueueItemClose:
+ timer.Stop()
+ flush()
+ item.close.Done()
+ return
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
new file mode 100644
index 0000000..2fc0305
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// ReporterOption is a function that sets some option on the reporter.
+type ReporterOption func(c *reporterOptions)
+
+// ReporterOptions is a factory for all available ReporterOption's
+var ReporterOptions reporterOptions
+
+// reporterOptions control behavior of the reporter.
+type reporterOptions struct {
+ // queueSize is the size of internal queue where reported spans are stored before they are processed in the background
+ queueSize int
+ // bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
+ bufferFlushInterval time.Duration
+ // logger is used to log errors of span submissions
+ logger log.DebugLogger
+ // metrics is used to record runtime stats
+ metrics *Metrics
+}
+
+// QueueSize creates a ReporterOption that sets the size of the internal queue where
+// spans are stored before they are processed.
+func (reporterOptions) QueueSize(queueSize int) ReporterOption {
+ return func(r *reporterOptions) {
+ r.queueSize = queueSize
+ }
+}
+
+// Metrics creates a ReporterOption that initializes Metrics in the reporter,
+// which is used to record runtime statistics.
+func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
+ return func(r *reporterOptions) {
+ r.metrics = metrics
+ }
+}
+
+// BufferFlushInterval creates a ReporterOption that sets how often the queue
+// is force-flushed.
+func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
+ return func(r *reporterOptions) {
+ r.bufferFlushInterval = bufferFlushInterval
+ }
+}
+
+// Logger creates a ReporterOption that initializes the logger used to log
+// errors of span submissions.
+func (reporterOptions) Logger(logger Logger) ReporterOption {
+ return func(r *reporterOptions) {
+ r.logger = log.DebugLogAdapter(logger)
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
new file mode 100644
index 0000000..879948e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
@@ -0,0 +1,5 @@
+An Observer that can be used to emit RPC metrics
+================================================
+
+It can be attached to the tracer during tracer construction.
+See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
new file mode 100644
index 0000000..51aa11b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
+package rpcmetrics
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
new file mode 100644
index 0000000..3055524
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import "sync"
+
+// normalizedEndpoints is a cache for endpointName -> safeName mappings.
+type normalizedEndpoints struct {
+ names map[string]string
+ maxSize int
+ defaultName string
+ normalizer NameNormalizer
+ mux sync.RWMutex
+}
+
+func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
+ return &normalizedEndpoints{
+ maxSize: maxSize,
+ normalizer: normalizer,
+ names: make(map[string]string, maxSize),
+ }
+}
+
+// normalize looks up the name in the cache, if not found it uses normalizer
+// to convert the name to a safe name. If called with more than maxSize unique
+// names it returns "" for all other names beyond those already cached.
+func (n *normalizedEndpoints) normalize(name string) string {
+ n.mux.RLock()
+ norm, ok := n.names[name]
+ l := len(n.names)
+ n.mux.RUnlock()
+ if ok {
+ return norm
+ }
+ if l >= n.maxSize {
+ return ""
+ }
+ return n.normalizeWithLock(name)
+}
+
+func (n *normalizedEndpoints) normalizeWithLock(name string) string {
+ norm := n.normalizer.Normalize(name)
+ n.mux.Lock()
+ defer n.mux.Unlock()
+ // cache may have grown while we were not holding the lock
+ if len(n.names) >= n.maxSize {
+ return ""
+ }
+ n.names[name] = norm
+ return norm
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
new file mode 100644
index 0000000..a8cec2f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "sync"
+
+ "github.com/uber/jaeger-lib/metrics"
+)
+
+const (
+ otherEndpointsPlaceholder = "other"
+ endpointNameMetricTag = "endpoint"
+)
+
+// Metrics is a collection of metrics for an endpoint describing
+// throughput, success, errors, and performance.
+type Metrics struct {
+ // RequestCountSuccess is a counter of the total number of successes.
+ RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
+
+ // RequestCountFailures is a counter of the number of times any failure has been observed.
+ RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
+
+ // RequestLatencySuccess is a latency histogram of successful requests.
+ RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
+
+ // RequestLatencyFailures is a latency histogram of failed requests.
+ RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
+
+ // HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
+ HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
+
+ // HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
+ HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
+
+ // HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
+ HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
+
+ // HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
+ HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
+}
+
+func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
+ if statusCode >= 200 && statusCode < 300 {
+ m.HTTPStatusCode2xx.Inc(1)
+ } else if statusCode >= 300 && statusCode < 400 {
+ m.HTTPStatusCode3xx.Inc(1)
+ } else if statusCode >= 400 && statusCode < 500 {
+ m.HTTPStatusCode4xx.Inc(1)
+ } else if statusCode >= 500 && statusCode < 600 {
+ m.HTTPStatusCode5xx.Inc(1)
+ }
+}
+
+// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
+// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
+// to a generic endpoint name "other".
+type MetricsByEndpoint struct {
+ metricsFactory metrics.Factory
+ endpoints *normalizedEndpoints
+ metricsByEndpoint map[string]*Metrics
+ mux sync.RWMutex
+}
+
+func newMetricsByEndpoint(
+ metricsFactory metrics.Factory,
+ normalizer NameNormalizer,
+ maxNumberOfEndpoints int,
+) *MetricsByEndpoint {
+ return &MetricsByEndpoint{
+ metricsFactory: metricsFactory,
+ endpoints: newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
+ metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
+ }
+}
+
+func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
+ safeName := m.endpoints.normalize(endpoint)
+ if safeName == "" {
+ safeName = otherEndpointsPlaceholder
+ }
+ m.mux.RLock()
+ met := m.metricsByEndpoint[safeName]
+ m.mux.RUnlock()
+ if met != nil {
+ return met
+ }
+
+ return m.getWithWriteLock(safeName)
+}
+
+// split to make easier to test
+func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ // it is possible that the name has been already registered after we released
+ // the read lock and before we grabbed the write lock, so check for that.
+ if met, ok := m.metricsByEndpoint[safeName]; ok {
+ return met
+ }
+
+ // it would be nice to create the struct before locking, since Init() is somewhat
+ // expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
+ met := &Metrics{}
+ tags := map[string]string{endpointNameMetricTag: safeName}
+ metrics.Init(met, m.metricsFactory, tags)
+
+ m.metricsByEndpoint[safeName] = met
+ return met
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
new file mode 100644
index 0000000..148d84b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+// NameNormalizer is used to convert the endpoint names to strings
+// that can be safely used as tags in the metrics.
+type NameNormalizer interface {
+ Normalize(name string) string
+}
+
+// DefaultNameNormalizer converts endpoint names so that they contain only characters
+// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
+var DefaultNameNormalizer = &SimpleNameNormalizer{
+ SafeSets: []SafeCharacterSet{
+ &Range{From: 'a', To: 'z'},
+ &Range{From: 'A', To: 'Z'},
+ &Range{From: '0', To: '9'},
+ &Char{'-'},
+ &Char{'_'},
+ &Char{'/'},
+ &Char{'.'},
+ },
+ Replacement: '-',
+}
+
+// SimpleNameNormalizer uses a set of safe character sets.
+type SimpleNameNormalizer struct {
+ SafeSets []SafeCharacterSet
+ Replacement byte
+}
+
+// SafeCharacterSet determines if the given character is "safe"
+type SafeCharacterSet interface {
+ IsSafe(c byte) bool
+}
+
+// Range implements SafeCharacterSet
+type Range struct {
+ From, To byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (r *Range) IsSafe(c byte) bool {
+ return c >= r.From && c <= r.To
+}
+
+// Char implements SafeCharacterSet
+type Char struct {
+ Val byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (ch *Char) IsSafe(c byte) bool {
+ return c == ch.Val
+}
+
+// Normalize checks each character in the string against SafeSets,
+// and if it's not safe substitutes it with Replacement.
+func (n *SimpleNameNormalizer) Normalize(name string) string {
+ var retMe []byte
+ nameBytes := []byte(name)
+ for i, b := range nameBytes {
+ if n.safeByte(b) {
+ if retMe != nil {
+ retMe[i] = b
+ }
+ } else {
+ if retMe == nil {
+ retMe = make([]byte, len(nameBytes))
+ copy(retMe[0:i], nameBytes[0:i])
+ }
+ retMe[i] = n.Replacement
+ }
+ }
+ if retMe == nil {
+ return name
+ }
+ return string(retMe)
+}
+
+// safeByte checks if b against all safe charsets.
+func (n *SimpleNameNormalizer) safeByte(b byte) bool {
+ for i := range n.SafeSets {
+ if n.SafeSets[i].IsSafe(b) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
new file mode 100644
index 0000000..eca5ff6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/uber/jaeger-lib/metrics"
+
+ jaeger "github.com/uber/jaeger-client-go"
+)
+
+const defaultMaxNumberOfEndpoints = 200
+
+// Observer is an observer that can emit RPC metrics.
+type Observer struct {
+ metricsByEndpoint *MetricsByEndpoint
+}
+
+// NewObserver creates a new observer that can emit RPC metrics.
+func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
+ return &Observer{
+ metricsByEndpoint: newMetricsByEndpoint(
+ metricsFactory,
+ normalizer,
+ defaultMaxNumberOfEndpoints,
+ ),
+ }
+}
+
+// OnStartSpan creates a new Observer for the span.
+func (o *Observer) OnStartSpan(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) jaeger.SpanObserver {
+ return NewSpanObserver(o.metricsByEndpoint, operationName, options)
+}
+
+// SpanKind identifies the span as inboud, outbound, or internal
+type SpanKind int
+
+const (
+ // Local span kind
+ Local SpanKind = iota
+ // Inbound span kind
+ Inbound
+ // Outbound span kind
+ Outbound
+)
+
+// SpanObserver collects RPC metrics
+type SpanObserver struct {
+ metricsByEndpoint *MetricsByEndpoint
+ operationName string
+ startTime time.Time
+ mux sync.Mutex
+ kind SpanKind
+ httpStatusCode uint16
+ err bool
+}
+
+// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
+func NewSpanObserver(
+ metricsByEndpoint *MetricsByEndpoint,
+ operationName string,
+ options opentracing.StartSpanOptions,
+) *SpanObserver {
+ so := &SpanObserver{
+ metricsByEndpoint: metricsByEndpoint,
+ operationName: operationName,
+ startTime: options.StartTime,
+ }
+ for k, v := range options.Tags {
+ so.handleTagInLock(k, v)
+ }
+ return so
+}
+
+// handleTags watches for special tags
+// - SpanKind
+// - HttpStatusCode
+// - Error
+func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
+ if key == string(ext.SpanKind) {
+ if v, ok := value.(ext.SpanKindEnum); ok {
+ value = string(v)
+ }
+ if v, ok := value.(string); ok {
+ if v == string(ext.SpanKindRPCClientEnum) {
+ so.kind = Outbound
+ } else if v == string(ext.SpanKindRPCServerEnum) {
+ so.kind = Inbound
+ }
+ }
+ return
+ }
+ if key == string(ext.HTTPStatusCode) {
+ if v, ok := value.(uint16); ok {
+ so.httpStatusCode = v
+ } else if v, ok := value.(int); ok {
+ so.httpStatusCode = uint16(v)
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.Atoi(v); err == nil {
+ so.httpStatusCode = uint16(vv)
+ }
+ }
+ return
+ }
+ if key == string(ext.Error) {
+ if v, ok := value.(bool); ok {
+ so.err = v
+ } else if v, ok := value.(string); ok {
+ if vv, err := strconv.ParseBool(v); err == nil {
+ so.err = vv
+ }
+ }
+ return
+ }
+}
+
+// OnFinish emits the RPC metrics. It only has an effect when operation name
+// is not blank, and the span kind is an RPC server.
+func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
+ so.mux.Lock()
+ defer so.mux.Unlock()
+
+ if so.operationName == "" || so.kind != Inbound {
+ return
+ }
+
+ mets := so.metricsByEndpoint.get(so.operationName)
+ latency := options.FinishTime.Sub(so.startTime)
+ if so.err {
+ mets.RequestCountFailures.Inc(1)
+ mets.RequestLatencyFailures.Record(latency)
+ } else {
+ mets.RequestCountSuccess.Inc(1)
+ mets.RequestLatencySuccess.Record(latency)
+ }
+ mets.recordHTTPStatusCode(so.httpStatusCode)
+}
+
+// OnSetOperationName records new operation name.
+func (so *SpanObserver) OnSetOperationName(operationName string) {
+ so.mux.Lock()
+ so.operationName = operationName
+ so.mux.Unlock()
+}
+
+// OnSetTag implements SpanObserver
+func (so *SpanObserver) OnSetTag(key string, value interface{}) {
+ so.mux.Lock()
+ so.handleTagInLock(key, value)
+ so.mux.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
new file mode 100644
index 0000000..d0be8ad
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -0,0 +1,516 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "math"
+ "strings"
+ "sync"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ defaultMaxOperations = 2000
+)
+
+// Sampler decides whether a new trace should be sampled or not.
+type Sampler interface {
+ // IsSampled decides whether a trace with given `id` and `operation`
+ // should be sampled. This function will also return the tags that
+ // can be used to identify the type of sampling that was applied to
+ // the root span. Most simple samplers would return two tags,
+ // sampler.type and sampler.param, similar to those used in the Configuration
+ IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+
+ // Equal checks if the `other` sampler is functionally equivalent
+ // to this sampler.
+ // TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
+ Equal(other Sampler) bool
+}
+
+// -----------------------
+
+// ConstSampler is a sampler that always makes the same decision.
+type ConstSampler struct {
+ legacySamplerV1Base
+ Decision bool
+ tags []Tag
+}
+
+// NewConstSampler creates a ConstSampler.
+func NewConstSampler(sample bool) *ConstSampler {
+ tags := []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeConst},
+ {key: SamplerParamTagKey, value: sample},
+ }
+ s := &ConstSampler{
+ Decision: sample,
+ tags: tags,
+ }
+ s.delegate = s.IsSampled
+ return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.Decision, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ConstSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ConstSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ConstSampler); ok {
+ return s.Decision == o.Decision
+ }
+ return false
+}
+
+// String is used to log sampler details.
+func (s *ConstSampler) String() string {
+ return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
+}
+
+// -----------------------
+
+// ProbabilisticSampler is a sampler that randomly samples a certain percentage
+// of traces.
+type ProbabilisticSampler struct {
+ legacySamplerV1Base
+ samplingRate float64
+ samplingBoundary uint64
+ tags []Tag
+}
+
+const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
+
+// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
+// samplingRate, in the range between 0.0 and 1.0.
+//
+// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
+// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
+// TODO remove the error from this function for next major release
+func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ return newProbabilisticSampler(samplingRate), nil
+}
+
+func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
+ s := new(ProbabilisticSampler)
+ s.delegate = s.IsSampled
+ return s.init(samplingRate)
+}
+
+func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
+ s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+ s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
+ s.tags = []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
+ {key: SamplerParamTagKey, value: s.samplingRate},
+ }
+ return s
+}
+
+// SamplingRate returns the sampling probability this sampled was constructed with.
+func (s *ProbabilisticSampler) SamplingRate() float64 {
+ return s.samplingRate
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ProbabilisticSampler) Close() {
+ // nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ProbabilisticSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*ProbabilisticSampler); ok {
+ return s.samplingBoundary == o.samplingBoundary
+ }
+ return false
+}
+
+// Update modifies in-place the sampling rate. Locking must be done externally.
+func (s *ProbabilisticSampler) Update(samplingRate float64) error {
+ if samplingRate < 0.0 || samplingRate > 1.0 {
+ return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+ }
+ s.init(samplingRate)
+ return nil
+}
+
+// String is used to log sampler details.
+func (s *ProbabilisticSampler) String() string {
+ return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
+}
+
+// -----------------------
+
+// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
+// burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
+// number of sequential requests can be sampled each second.
+type RateLimitingSampler struct {
+ legacySamplerV1Base
+ maxTracesPerSecond float64
+ rateLimiter *utils.ReconfigurableRateLimiter
+ tags []Tag
+}
+
+// NewRateLimitingSampler creates new RateLimitingSampler.
+func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
+ s := new(RateLimitingSampler)
+ s.delegate = s.IsSampled
+ return s.init(maxTracesPerSecond)
+}
+
+func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
+ if s.rateLimiter == nil {
+ s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ } else {
+ s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+ }
+ s.maxTracesPerSecond = maxTracesPerSecond
+ s.tags = []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
+ {key: SamplerParamTagKey, value: maxTracesPerSecond},
+ }
+ return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return s.rateLimiter.CheckCredit(1.0), s.tags
+}
+
+// Update reconfigures the rate limiter, while preserving its accumulated balance.
+// Locking must be done externally.
+func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
+ if s.maxTracesPerSecond != maxTracesPerSecond {
+ s.init(maxTracesPerSecond)
+ }
+}
+
+// Close does nothing.
+func (s *RateLimitingSampler) Close() {
+ // nothing to do
+}
+
+// Equal compares with another sampler.
+func (s *RateLimitingSampler) Equal(other Sampler) bool {
+ if o, ok := other.(*RateLimitingSampler); ok {
+ return s.maxTracesPerSecond == o.maxTracesPerSecond
+ }
+ return false
+}
+
+// String is used to log sampler details.
+func (s *RateLimitingSampler) String() string {
+ return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
+}
+
+// -----------------------
+
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
+// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
+// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
+// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
+//
+// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for ProbabilisticSampler will be used.
+type GuaranteedThroughputProbabilisticSampler struct {
+ probabilisticSampler *ProbabilisticSampler
+ lowerBoundSampler *RateLimitingSampler
+ tags []Tag
+ samplingRate float64
+ lowerBound float64
+}
+
+// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
+// ProbabilisticSampler and RateLimitingSampler.
+func NewGuaranteedThroughputProbabilisticSampler(
+ lowerBound, samplingRate float64,
+) (*GuaranteedThroughputProbabilisticSampler, error) {
+ return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
+}
+
+func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
+ s := &GuaranteedThroughputProbabilisticSampler{
+ lowerBoundSampler: NewRateLimitingSampler(lowerBound),
+ lowerBound: lowerBound,
+ }
+ s.setProbabilisticSampler(samplingRate)
+ return s
+}
+
+func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
+ if s.probabilisticSampler == nil {
+ s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+ } else if s.samplingRate != samplingRate {
+ s.probabilisticSampler.init(samplingRate)
+ }
+ // since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
+ samplingRate = s.probabilisticSampler.SamplingRate()
+ if s.samplingRate != samplingRate || s.tags == nil {
+ s.samplingRate = s.probabilisticSampler.SamplingRate()
+ s.tags = []Tag{
+ {key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
+ {key: SamplerParamTagKey, value: s.samplingRate},
+ }
+ }
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
+ s.lowerBoundSampler.IsSampled(id, operation)
+ return true, tags
+ }
+ sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
+ return sampled, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Close() {
+ s.probabilisticSampler.Close()
+ s.lowerBoundSampler.Close()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+ // more information.
+ return false
+}
+
+// this function should only be called while holding a Write lock
+func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
+ s.setProbabilisticSampler(samplingRate)
+ if s.lowerBound != lowerBound {
+ s.lowerBoundSampler.Update(lowerBound)
+ s.lowerBound = lowerBound
+ }
+}
+
+func (s GuaranteedThroughputProbabilisticSampler) String() string {
+ return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
+}
+
+// -----------------------
+
+// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
+// on a per-operation basis.
+type PerOperationSampler struct {
+ sync.RWMutex
+
+ samplers map[string]*GuaranteedThroughputProbabilisticSampler
+ defaultSampler *ProbabilisticSampler
+ lowerBound float64
+ maxOperations int
+
+ // see description in PerOperationSamplerParams
+ operationNameLateBinding bool
+}
+
+// NewAdaptiveSampler returns a new PerOperationSampler.
+// Deprecated: please use NewPerOperationSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: maxOperations,
+ Strategies: strategies,
+ }), nil
+}
+
+// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
+type PerOperationSamplerParams struct {
+ // Max number of operations that will be tracked. Other operations will be given default strategy.
+ MaxOperations int
+
+ // Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
+ // When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
+ // the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
+ // in applications that always provide the correct span name on trace creation.
+ //
+ // For backwards compatibility this option is off by default.
+ OperationNameLateBinding bool
+
+ // Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
+ Strategies *sampling.PerOperationSamplingStrategies
+}
+
+// NewPerOperationSampler returns a new PerOperationSampler.
+func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
+ if params.MaxOperations <= 0 {
+ params.MaxOperations = defaultMaxOperations
+ }
+ samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
+ for _, strategy := range params.Strategies.PerOperationStrategies {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ params.Strategies.DefaultLowerBoundTracesPerSecond,
+ strategy.ProbabilisticSampling.SamplingRate,
+ )
+ samplers[strategy.Operation] = sampler
+ }
+ return &PerOperationSampler{
+ samplers: samplers,
+ defaultSampler: newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
+ lowerBound: params.Strategies.DefaultLowerBoundTracesPerSecond,
+ maxOperations: params.MaxOperations,
+ operationNameLateBinding: params.OperationNameLateBinding,
+ }
+}
+
+// IsSampled is not used and only exists to match Sampler V1 API.
+// TODO (breaking change) remove when upgrading everything to SamplerV2
+func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
+ samplerV1 := s.getSamplerForOperation(operationName)
+ var sampled bool
+ var tags []Tag
+ if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
+ sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
+ }
+ return sampled, tags
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
+ sampled, tags := s.trySampling(span, span.OperationName())
+ return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ sampled, tags := s.trySampling(span, operationName)
+ return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
+ s.RLock()
+ sampler, ok := s.samplers[operation]
+ if ok {
+ defer s.RUnlock()
+ return sampler
+ }
+ s.RUnlock()
+ s.Lock()
+ defer s.Unlock()
+
+ // Check if sampler has already been created
+ sampler, ok = s.samplers[operation]
+ if ok {
+ return sampler
+ }
+ // Store only up to maxOperations of unique ops.
+ if len(s.samplers) >= s.maxOperations {
+ return s.defaultSampler
+ }
+ newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
+ s.samplers[operation] = newSampler
+ return newSampler
+}
+
+// Close invokes Close on all underlying samplers.
+func (s *PerOperationSampler) Close() {
+ s.Lock()
+ defer s.Unlock()
+ for _, sampler := range s.samplers {
+ sampler.Close()
+ }
+ s.defaultSampler.Close()
+}
+
+func (s *PerOperationSampler) String() string {
+ var sb strings.Builder
+
+ fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
+ fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
+ fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
+ fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
+ fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
+ fmt.Fprintf(&sb, "samplers=[")
+ for operationName, sampler := range s.samplers {
+ fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
+ }
+ fmt.Fprintf(&sb, "])")
+
+ return sb.String()
+}
+
+// Equal is not used.
+// TODO (breaking change) remove this in the future
+func (s *PerOperationSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
+ // samplers which all need to be initialized before this function can be called for a comparison.
+ // Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
+ // changing. Hence this function always returns false so that the update function can be called.
+ // Once the Equal() function is removed from the Sampler API, this will no longer be needed.
+ return false
+}
+
+func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+ s.Lock()
+ defer s.Unlock()
+ newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
+ for _, strategy := range strategies.PerOperationStrategies {
+ operation := strategy.Operation
+ samplingRate := strategy.ProbabilisticSampling.SamplingRate
+ lowerBound := strategies.DefaultLowerBoundTracesPerSecond
+ if sampler, ok := s.samplers[operation]; ok {
+ sampler.update(lowerBound, samplingRate)
+ newSamplers[operation] = sampler
+ } else {
+ sampler := newGuaranteedThroughputProbabilisticSampler(
+ lowerBound,
+ samplingRate,
+ )
+ newSamplers[operation] = sampler
+ }
+ }
+ s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
+ if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
+ s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
+ }
+ s.samplers = newSamplers
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
new file mode 100644
index 0000000..119f0a1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -0,0 +1,358 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/thrift-gen/sampling"
+)
+
+const (
+ defaultRemoteSamplingTimeout = 10 * time.Second
+ defaultSamplingRefreshInterval = time.Minute
+)
+
+// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
+type SamplingStrategyFetcher interface {
+ Fetch(service string) ([]byte, error)
+}
+
+// SamplingStrategyParser is used to parse sampling strategy updates. The output object
+// should be of the type that is recognized by the SamplerUpdaters.
+type SamplingStrategyParser interface {
+ Parse(response []byte) (interface{}, error)
+}
+
+// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
+// retrieved from remote config server, to the current sampler. The updater can modify
+// the sampler in-place if sampler supports it, or create a new one.
+//
+// If the strategy does not contain configuration for the sampler in question,
+// updater must return modifiedSampler=nil to give other updaters a chance to inspect
+// the sampling strategy response.
+//
+// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
+type SamplerUpdater interface {
+ Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
+}
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+ // These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+ // Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+ closed int64 // 0 - not closed, 1 - closed
+
+ sync.RWMutex // used to serialize access to samplerOptions.sampler
+ samplerOptions
+
+ serviceName string
+ doneChan chan *sync.WaitGroup
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+ serviceName string,
+ opts ...SamplerOption,
+) *RemotelyControlledSampler {
+ options := new(samplerOptions).applyOptionsAndDefaults(opts...)
+ sampler := &RemotelyControlledSampler{
+ samplerOptions: *options,
+ serviceName: serviceName,
+ doneChan: make(chan *sync.WaitGroup),
+ }
+ go sampler.pollController()
+ return sampler
+}
+
+// IsSampled implements IsSampled() of Sampler.
+// TODO (breaking change) remove when Sampler V1 is removed
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+ return false, nil
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.OnCreateSpan(span)
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.OnSetOperationName(span, operationName)
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.OnSetTag(span, key, value)
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler.OnFinishSpan(span)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+ if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+ s.logger.Error("Repeated attempt to close the sampler is ignored")
+ return
+ }
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+ s.doneChan <- &wg
+ wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+ // NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+ // more information.
+ return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+ ticker := time.NewTicker(s.samplingRefreshInterval)
+ defer ticker.Stop()
+ s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+ for {
+ select {
+ case <-ticker.C:
+ s.UpdateSampler()
+ case wg := <-s.doneChan:
+ wg.Done()
+ return
+ }
+ }
+}
+
+// Sampler returns the currently active sampler.
+func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
+ s.RLock()
+ defer s.RUnlock()
+ return s.sampler
+}
+func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
+ s.Lock()
+ defer s.Unlock()
+ s.sampler = sampler
+}
+
+// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
+// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
+func (s *RemotelyControlledSampler) UpdateSampler() {
+ res, err := s.samplingFetcher.Fetch(s.serviceName)
+ if err != nil {
+ s.metrics.SamplerQueryFailure.Inc(1)
+ s.logger.Infof("failed to fetch sampling strategy: %v", err)
+ return
+ }
+ strategy, err := s.samplingParser.Parse(res)
+ if err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to parse sampling strategy response: %v", err)
+ return
+ }
+
+ s.Lock()
+ defer s.Unlock()
+
+ s.metrics.SamplerRetrieved.Inc(1)
+ if err := s.updateSamplerViaUpdaters(strategy); err != nil {
+ s.metrics.SamplerUpdateFailure.Inc(1)
+ s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
+ return
+ }
+ s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
+ for _, updater := range s.updaters {
+ sampler, err := updater.Update(s.sampler, strategy)
+ if err != nil {
+ return err
+ }
+ if sampler != nil {
+ s.logger.Debugf("sampler updated: %+v", sampler)
+ s.sampler = sampler
+ return nil
+ }
+ }
+ return fmt.Errorf("unsupported sampling strategy %+v", strategy)
+}
+
+// -----------------------
+
+// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type ProbabilisticSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
+ if ps, ok := sampler.(*ProbabilisticSampler); ok {
+ if err := ps.Update(probabilistic.SamplingRate); err != nil {
+ return nil, err
+ }
+ return sampler, nil
+ }
+ return newProbabilisticSampler(probabilistic.SamplingRate), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type RateLimitingSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if resp, ok := strategy.(response); ok {
+ if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
+ rateLimit := float64(rateLimiting.MaxTracesPerSecond)
+ if rl, ok := sampler.(*RateLimitingSampler); ok {
+ rl.Update(rateLimit)
+ return rl, nil
+ }
+ return NewRateLimitingSampler(rateLimit), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+// Fields have the same meaning as in PerOperationSamplerParams.
+type AdaptiveSamplerUpdater struct {
+ MaxOperations int
+ OperationNameLateBinding bool
+}
+
+// Update implements Update of SamplerUpdater.
+func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+ type response interface {
+ GetOperationSampling() *sampling.PerOperationSamplingStrategies
+ }
+ var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+ if p, ok := strategy.(response); ok {
+ if operations := p.GetOperationSampling(); operations != nil {
+ if as, ok := sampler.(*PerOperationSampler); ok {
+ as.update(operations)
+ return as, nil
+ }
+ return NewPerOperationSampler(PerOperationSamplerParams{
+ MaxOperations: u.MaxOperations,
+ OperationNameLateBinding: u.OperationNameLateBinding,
+ Strategies: operations,
+ }), nil
+ }
+ }
+ return nil, nil
+}
+
+// -----------------------
+
+type httpSamplingStrategyFetcher struct {
+ serverURL string
+ logger log.DebugLogger
+ httpClient http.Client
+}
+
+func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher {
+ customTransport := http.DefaultTransport.(*http.Transport).Clone()
+ customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout
+
+ return &httpSamplingStrategyFetcher{
+ serverURL: serverURL,
+ logger: logger,
+ httpClient: http.Client{
+ Transport: customTransport,
+ },
+ }
+}
+
+func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
+ v := url.Values{}
+ v.Set("service", serviceName)
+ uri := f.serverURL + "?" + v.Encode()
+
+ resp, err := f.httpClient.Get(uri)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err := resp.Body.Close(); err != nil {
+ f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
+ }
+ }()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode >= 400 {
+ return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ return body, nil
+}
+
+// -----------------------
+
+type samplingStrategyParser struct{}
+
+func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
+ strategy := new(sampling.SamplingStrategyResponse)
+ if err := json.Unmarshal(response, strategy); err != nil {
+ return nil, err
+ }
+ return strategy, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
new file mode 100644
index 0000000..64b028b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// SamplerOption is a function that sets some option on the sampler
+type SamplerOption func(options *samplerOptions)
+
+// SamplerOptions is a factory for all available SamplerOption's.
+var SamplerOptions SamplerOptionsFactory
+
+// SamplerOptionsFactory is a factory for all available SamplerOption's.
+// The type acts as a namespace for factory functions. It is public to
+// make the functions discoverable via godoc. Recommended to be used
+// via global SamplerOptions variable.
+type SamplerOptionsFactory struct{}
+
+type samplerOptions struct {
+ metrics *Metrics
+ sampler SamplerV2
+ logger log.DebugLogger
+ samplingServerURL string
+ samplingRefreshInterval time.Duration
+ samplingFetcher SamplingStrategyFetcher
+ samplingParser SamplingStrategyParser
+ updaters []SamplerUpdater
+ posParams PerOperationSamplerParams
+}
+
+// Metrics creates a SamplerOption that initializes Metrics on the sampler,
+// which is used to emit statistics.
+func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
+ return func(o *samplerOptions) {
+ o.metrics = m
+ }
+}
+
+// MaxOperations creates a SamplerOption that sets the maximum number of
+// operations the sampler will keep track of.
+func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
+ return func(o *samplerOptions) {
+ o.posParams.MaxOperations = maxOperations
+ }
+}
+
+// OperationNameLateBinding creates a SamplerOption that sets the respective
+// field in the PerOperationSamplerParams.
+func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
+ return func(o *samplerOptions) {
+ o.posParams.OperationNameLateBinding = enable
+ }
+}
+
+// InitialSampler creates a SamplerOption that sets the initial sampler
+// to use before a remote sampler is created and used.
+func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
+ return func(o *samplerOptions) {
+ o.sampler = samplerV1toV2(sampler)
+ }
+}
+
+// Logger creates a SamplerOption that sets the logger used by the sampler.
+func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
+ return func(o *samplerOptions) {
+ o.logger = log.DebugLogAdapter(logger)
+ }
+}
+
+// SamplingServerURL creates a SamplerOption that sets the sampling server url
+// of the local agent that contains the sampling strategies.
+func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingServerURL = samplingServerURL
+ }
+}
+
+// SamplingRefreshInterval creates a SamplerOption that sets how often the
+// sampler will poll local agent for the appropriate sampling strategy.
+func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingRefreshInterval = samplingRefreshInterval
+ }
+}
+
+// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
+func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingFetcher = fetcher
+ }
+}
+
+// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
+func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
+ return func(o *samplerOptions) {
+ o.samplingParser = parser
+ }
+}
+
+// Updaters creates a SamplerOption that initializes sampler updaters.
+func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
+ return func(o *samplerOptions) {
+ o.updaters = updaters
+ }
+}
+
+func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
+ for _, option := range opts {
+ option(o)
+ }
+ if o.sampler == nil {
+ o.sampler = newProbabilisticSampler(0.001)
+ }
+ if o.logger == nil {
+ o.logger = log.NullLogger
+ }
+ if o.samplingServerURL == "" {
+ o.samplingServerURL = DefaultSamplingServerURL
+ }
+ if o.metrics == nil {
+ o.metrics = NewNullMetrics()
+ }
+ if o.samplingRefreshInterval <= 0 {
+ o.samplingRefreshInterval = defaultSamplingRefreshInterval
+ }
+ if o.samplingFetcher == nil {
+ o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger)
+ }
+ if o.samplingParser == nil {
+ o.samplingParser = new(samplingStrategyParser)
+ }
+ if o.updaters == nil {
+ o.updaters = []SamplerUpdater{
+ &AdaptiveSamplerUpdater{
+ MaxOperations: o.posParams.MaxOperations,
+ OperationNameLateBinding: o.posParams.OperationNameLateBinding,
+ },
+ new(ProbabilisticSamplerUpdater),
+ new(RateLimitingSamplerUpdater),
+ }
+ }
+ return o
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
new file mode 100644
index 0000000..a50671a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// SamplingDecision is returned by the V2 samplers.
+type SamplingDecision struct {
+ Sample bool
+ Retryable bool
+ Tags []Tag
+}
+
+// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
+// be made at different points of the span lifecycle.
+type SamplerV2 interface {
+ OnCreateSpan(span *Span) SamplingDecision
+ OnSetOperationName(span *Span, operationName string) SamplingDecision
+ OnSetTag(span *Span, key string, value interface{}) SamplingDecision
+ OnFinishSpan(span *Span) SamplingDecision
+
+ // Close does a clean shutdown of the sampler, stopping any background
+ // go-routines it may have started.
+ Close()
+}
+
+// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
+func samplerV1toV2(s Sampler) SamplerV2 {
+ if s2, ok := s.(SamplerV2); ok {
+ return s2
+ }
+ type legacySamplerV1toV2Adapter struct {
+ legacySamplerV1Base
+ }
+ return &legacySamplerV1toV2Adapter{
+ legacySamplerV1Base: legacySamplerV1Base{
+ delegate: s.IsSampled,
+ },
+ }
+}
+
+// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
+// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
+// for backwards compatibility reasons.
+// TODO (breaking change) remove this in the next major release
+type SamplerV2Base struct{}
+
+// IsSampled implements IsSampled of Sampler.
+func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
+ return false, nil
+}
+
+// Close implements Close of Sampler.
+func (SamplerV2Base) Close() {}
+
+// Equal implements Equal of Sampler.
+func (SamplerV2Base) Equal(other Sampler) bool { return false }
+
+// legacySamplerV1Base is used as a base for simple samplers that only implement
+// the legacy isSampled() function that is not sensitive to its arguments.
+type legacySamplerV1Base struct {
+ delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
+}
+
+func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+ isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+ return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
+ return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
new file mode 100644
index 0000000..997cffd
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -0,0 +1,503 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+ "github.com/opentracing/opentracing-go/log"
+)
+
+// Span implements opentracing.Span
+type Span struct {
+ // referenceCounter used to increase the lifetime of
+ // the object before return it into the pool.
+ referenceCounter int32
+
+ sync.RWMutex
+
+ tracer *Tracer
+
+ // TODO: (breaking change) change to use a pointer
+ context SpanContext
+
+ // The name of the "operation" this span is an instance of.
+ // Known as a "span name" in some implementations.
+ operationName string
+
+ // firstInProcess, if true, indicates that this span is the root of the (sub)tree
+ // of spans in the current process. In other words it's true for the root spans,
+ // and the ingress spans when the process joins another trace.
+ firstInProcess bool
+
+ // startTime is the timestamp indicating when the span began, with microseconds precision.
+ startTime time.Time
+
+ // duration returns duration of the span with microseconds precision.
+ // Zero value means duration is unknown.
+ duration time.Duration
+
+ // tags attached to this span
+ tags []Tag
+
+ // The span's "micro-log"
+ logs []opentracing.LogRecord
+
+ // The number of logs dropped because of MaxLogsPerSpan.
+ numDroppedLogs int
+
+ // references for this span
+ references []Reference
+
+ observer ContribSpanObserver
+}
+
+// Tag is a simple key value wrapper.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+type Tag struct {
+ key string
+ value interface{}
+}
+
+// NewTag creates a new Tag.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+func NewTag(key string, value interface{}) Tag {
+ return Tag{key: key, value: value}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *Span) SetOperationName(operationName string) opentracing.Span {
+ s.Lock()
+ s.operationName = operationName
+ ctx := s.context
+ s.Unlock()
+ if !ctx.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetOperationName(s, operationName)
+ s.applySamplingDecision(decision, true)
+ }
+ s.observer.OnSetOperationName(operationName)
+ return s
+}
+
+// SetTag implements SetTag() of opentracing.Span
+func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+ return s.setTagInternal(key, value, true)
+}
+
+func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
+ var ctx SpanContext
+ var operationName string
+ if lock {
+ ctx = s.SpanContext()
+ operationName = s.OperationName()
+ } else {
+ ctx = s.context
+ operationName = s.operationName
+ }
+
+ s.observer.OnSetTag(key, value)
+ if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) {
+ return s
+ }
+ if !ctx.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnSetTag(s, key, value)
+ s.applySamplingDecision(decision, lock)
+ }
+ if ctx.isWriteable() {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ s.appendTagNoLocking(key, value)
+ }
+ return s
+}
+
+// SpanContext returns span context
+func (s *Span) SpanContext() SpanContext {
+ s.Lock()
+ defer s.Unlock()
+ return s.context
+}
+
+// StartTime returns span start time
+func (s *Span) StartTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.startTime
+}
+
+// Duration returns span duration
+func (s *Span) Duration() time.Duration {
+ s.Lock()
+ defer s.Unlock()
+ return s.duration
+}
+
+// Tags returns tags for span
+func (s *Span) Tags() opentracing.Tags {
+ s.Lock()
+ defer s.Unlock()
+ var result = make(opentracing.Tags, len(s.tags))
+ for _, tag := range s.tags {
+ result[tag.key] = tag.value
+ }
+ return result
+}
+
+// Logs returns micro logs for span
+func (s *Span) Logs() []opentracing.LogRecord {
+ s.Lock()
+ defer s.Unlock()
+
+ logs := append([]opentracing.LogRecord(nil), s.logs...)
+ if s.numDroppedLogs != 0 {
+ fixLogs(logs, s.numDroppedLogs)
+ }
+
+ return logs
+}
+
+// References returns references for this span
+func (s *Span) References() []opentracing.SpanReference {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.references == nil || len(s.references) == 0 {
+ return nil
+ }
+
+ result := make([]opentracing.SpanReference, len(s.references))
+ for i, r := range s.references {
+ result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
+ }
+ return result
+}
+
+func (s *Span) appendTagNoLocking(key string, value interface{}) {
+ s.tags = append(s.tags, Tag{key: key, value: value})
+}
+
+// LogFields implements opentracing.Span API
+func (s *Span) LogFields(fields ...log.Field) {
+ s.Lock()
+ defer s.Unlock()
+ if !s.context.IsSampled() {
+ return
+ }
+ s.logFieldsNoLocking(fields...)
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) logFieldsNoLocking(fields ...log.Field) {
+ lr := opentracing.LogRecord{
+ Fields: fields,
+ Timestamp: time.Now(),
+ }
+ s.appendLogNoLocking(lr)
+}
+
+// LogKV implements opentracing.Span API
+func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
+ s.RLock()
+ sampled := s.context.IsSampled()
+ s.RUnlock()
+ if !sampled {
+ return
+ }
+ fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
+ if err != nil {
+ s.LogFields(log.Error(err), log.String("function", "LogKV"))
+ return
+ }
+ s.LogFields(fields...)
+}
+
+// LogEvent implements opentracing.Span API
+func (s *Span) LogEvent(event string) {
+ s.Log(opentracing.LogData{Event: event})
+}
+
+// LogEventWithPayload implements opentracing.Span API
+func (s *Span) LogEventWithPayload(event string, payload interface{}) {
+ s.Log(opentracing.LogData{Event: event, Payload: payload})
+}
+
+// Log implements opentracing.Span API
+func (s *Span) Log(ld opentracing.LogData) {
+ s.Lock()
+ defer s.Unlock()
+ if s.context.IsSampled() {
+ if ld.Timestamp.IsZero() {
+ ld.Timestamp = s.tracer.timeNow()
+ }
+ s.appendLogNoLocking(ld.ToLogRecord())
+ }
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
+ maxLogs := s.tracer.options.maxLogsPerSpan
+ if maxLogs == 0 || len(s.logs) < maxLogs {
+ s.logs = append(s.logs, lr)
+ return
+ }
+
+ // We have too many logs. We don't touch the first numOld logs; we treat the
+ // rest as a circular buffer and overwrite the oldest log among those.
+ numOld := (maxLogs - 1) / 2
+ numNew := maxLogs - numOld
+ s.logs[numOld+s.numDroppedLogs%numNew] = lr
+ s.numDroppedLogs++
+}
+
+// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
+// the end (i.e. pos circular left shifts).
+func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
+ // This algorithm is described in:
+ // http://www.cplusplus.com/reference/algorithm/rotate
+ for first, middle, next := 0, pos, pos; first != middle; {
+ buf[first], buf[next] = buf[next], buf[first]
+ first++
+ next++
+ if next == len(buf) {
+ next = middle
+ } else if first == middle {
+ middle = next
+ }
+ }
+}
+
+func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
+ // We dropped some log events, which means that we used part of Logs as a
+ // circular buffer (see appendLog). De-circularize it.
+ numOld := (len(logs) - 1) / 2
+ numNew := len(logs) - numOld
+ rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
+
+ // Replace the log in the middle (the oldest "new" log) with information
+ // about the dropped logs. This means that we are effectively dropping one
+ // more "new" log.
+ numDropped := numDroppedLogs + 1
+ logs[numOld] = opentracing.LogRecord{
+ // Keep the timestamp of the last dropped event.
+ Timestamp: logs[numOld].Timestamp,
+ Fields: []log.Field{
+ log.String("event", "dropped Span logs"),
+ log.Int("dropped_log_count", numDropped),
+ log.String("component", "jaeger-client"),
+ },
+ }
+}
+
+func (s *Span) fixLogsIfDropped() {
+ if s.numDroppedLogs == 0 {
+ return
+ }
+ fixLogs(s.logs, s.numDroppedLogs)
+ s.numDroppedLogs = 0
+}
+
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext.
+// The call is proxied via tracer.baggageSetter to allow policies to be applied
+// before allowing to set/replace baggage keys.
+// The setter eventually stores a new SpanContext with extended baggage:
+//
+// span.context = span.context.WithBaggageItem(key, value)
+//
+// See SpanContext.WithBaggageItem() for explanation why it's done this way.
+func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
+ s.Lock()
+ defer s.Unlock()
+ s.tracer.setBaggage(s, key, value)
+ return s
+}
+
+// BaggageItem implements BaggageItem() of opentracing.SpanContext
+func (s *Span) BaggageItem(key string) string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.baggage[key]
+}
+
+// Finish implements opentracing.Span API
+// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
+// so after that, the Span object should no longer be used because it won't be valid anymore.
+func (s *Span) Finish() {
+ s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// FinishWithOptions implements opentracing.Span API
+func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
+ if options.FinishTime.IsZero() {
+ options.FinishTime = s.tracer.timeNow()
+ }
+ s.observer.OnFinish(options)
+ s.Lock()
+ s.duration = options.FinishTime.Sub(s.startTime)
+ ctx := s.context
+ s.Unlock()
+ if !ctx.isSamplingFinalized() {
+ decision := s.tracer.sampler.OnFinishSpan(s)
+ s.applySamplingDecision(decision, true)
+ }
+ if ctx.IsSampled() {
+ s.Lock()
+ s.fixLogsIfDropped()
+ if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
+ // Note: bulk logs are not subject to maxLogsPerSpan limit
+ if options.LogRecords != nil {
+ s.logs = append(s.logs, options.LogRecords...)
+ }
+ for _, ld := range options.BulkLogData {
+ s.logs = append(s.logs, ld.ToLogRecord())
+ }
+ }
+ s.Unlock()
+ }
+ // call reportSpan even for non-sampled traces, to return span to the pool
+ // and update metrics counter
+ s.tracer.reportSpan(s)
+}
+
+// Context implements opentracing.Span API
+func (s *Span) Context() opentracing.SpanContext {
+ s.Lock()
+ defer s.Unlock()
+ return s.context
+}
+
+// Tracer implements opentracing.Span API
+func (s *Span) Tracer() opentracing.Tracer {
+ return s.tracer
+}
+
+func (s *Span) String() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.context.String()
+}
+
+// OperationName allows retrieving current operation name.
+func (s *Span) OperationName() string {
+ s.RLock()
+ defer s.RUnlock()
+ return s.operationName
+}
+
+// Retain increases object counter to increase the lifetime of the object
+func (s *Span) Retain() *Span {
+ atomic.AddInt32(&s.referenceCounter, 1)
+ return s
+}
+
+// Release decrements object counter and return to the
+// allocator manager when counter will below zero
+func (s *Span) Release() {
+ if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
+ s.tracer.spanAllocator.Put(s)
+ }
+}
+
+// reset span state and release unused data
+func (s *Span) reset() {
+ s.firstInProcess = false
+ s.context = emptyContext
+ s.operationName = ""
+ s.tracer = nil
+ s.startTime = time.Time{}
+ s.duration = 0
+ s.observer = nil
+ atomic.StoreInt32(&s.referenceCounter, 0)
+
+ // Note: To reuse memory we can save the pointers on the heap
+ s.tags = s.tags[:0]
+ s.logs = s.logs[:0]
+ s.numDroppedLogs = 0
+ s.references = s.references[:0]
+}
+
+func (s *Span) serviceName() string {
+ return s.tracer.serviceName
+}
+
+func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+ var ctx SpanContext
+ if lock {
+ ctx = s.SpanContext()
+ } else {
+ ctx = s.context
+ }
+
+ if !decision.Retryable {
+ ctx.samplingState.setFinal()
+ }
+ if decision.Sample {
+ ctx.samplingState.setSampled()
+ if len(decision.Tags) > 0 {
+ if lock {
+ s.Lock()
+ defer s.Unlock()
+ }
+ for _, tag := range decision.Tags {
+ s.appendTagNoLocking(tag.key, tag.value)
+ }
+ }
+ }
+}
+
+// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+// The behavior of setSamplingPriority is surprising
+// If noDebugFlagOnForcedSampling is set
+// setSamplingPriority(..., 1) always sets only flagSampled
+// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
+// setSamplingPriority(..., 1) sets both flagSampled and flagDebug
+// However,
+// setSamplingPriority(..., 0) always only resets flagSampled
+//
+// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can
+// leave flagDebug set
+func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool {
+ val, ok := value.(uint16)
+ if !ok {
+ return false
+ }
+ if val == 0 {
+ state.unsetSampled()
+ state.setFinal()
+ return true
+ }
+ if tracer.options.noDebugFlagOnForcedSampling {
+ state.setSampled()
+ state.setFinal()
+ return true
+ } else if tracer.isDebugAllowed(operationName) {
+ state.setDebugAndSampled()
+ state.setFinal()
+ return true
+ }
+ return false
+}
+
+// EnableFirehose enables firehose flag on the span context
+func EnableFirehose(s *Span) {
+ s.Lock()
+ defer s.Unlock()
+ s.context.samplingState.setFirehose()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
new file mode 100644
index 0000000..6fe0cd0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2019 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "sync"
+
+// SpanAllocator abstraction of managign span allocations
+type SpanAllocator interface {
+ Get() *Span
+ Put(*Span)
+}
+
+type syncPollSpanAllocator struct {
+ spanPool sync.Pool
+}
+
+func newSyncPollSpanAllocator() SpanAllocator {
+ return &syncPollSpanAllocator{
+ spanPool: sync.Pool{New: func() interface{} {
+ return &Span{}
+ }},
+ }
+}
+
+func (pool *syncPollSpanAllocator) Get() *Span {
+ return pool.spanPool.Get().(*Span)
+}
+
+func (pool *syncPollSpanAllocator) Put(span *Span) {
+ span.reset()
+ pool.spanPool.Put(span)
+}
+
+type simpleSpanAllocator struct{}
+
+func (pool simpleSpanAllocator) Get() *Span {
+ return &Span{}
+}
+
+func (pool simpleSpanAllocator) Put(span *Span) {
+ // @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
+ // since finished spans are not reused, no need to reset them
+ // span.reset()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
new file mode 100644
index 0000000..5b2307b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -0,0 +1,418 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+
+ "go.uber.org/atomic"
+)
+
+const (
+ flagSampled = 1
+ flagDebug = 2
+ flagFirehose = 8
+)
+
+var (
+ errEmptyTracerStateString = errors.New("Cannot convert empty string to tracer state")
+ errMalformedTracerStateString = errors.New("String does not match tracer state format")
+
+ emptyContext = SpanContext{}
+)
+
+// TraceID represents unique 128bit identifier of a trace
+type TraceID struct {
+ High, Low uint64
+}
+
+// SpanID represents unique 64bit identifier of a span
+type SpanID uint64
+
+// SpanContext represents propagated span identity and state
+type SpanContext struct {
+ // traceID represents globally unique ID of the trace.
+ // Usually generated as a random number.
+ traceID TraceID
+
+ // spanID represents span ID that must be unique within its trace,
+ // but does not have to be globally unique.
+ spanID SpanID
+
+ // parentID refers to the ID of the parent span.
+ // Should be 0 if the current span is a root span.
+ parentID SpanID
+
+ // Distributed Context baggage. The is a snapshot in time.
+ baggage map[string]string
+
+ // debugID can be set to some correlation ID when the context is being
+ // extracted from a TextMap carrier.
+ //
+ // See JaegerDebugHeader in constants.go
+ debugID string
+
+ // samplingState is shared across all spans
+ samplingState *samplingState
+
+ // remote indicates that span context represents a remote parent
+ remote bool
+}
+
+type samplingState struct {
+ // Span context's state flags that are propagated across processes. Only lower 8 bits are used.
+ // We use an int32 instead of byte to be able to use CAS operations.
+ stateFlags atomic.Int32
+
+ // When state is not final, sampling will be retried on other span write operations,
+ // like SetOperationName / SetTag, and the spans will remain writable.
+ final atomic.Bool
+
+ // localRootSpan stores the SpanID of the first span created in this process for a given trace.
+ localRootSpan SpanID
+
+ // extendedState allows samplers to keep intermediate state.
+ // The keys and values in this map are completely opaque: interface{} -> interface{}.
+ extendedState sync.Map
+}
+
+func (s *samplingState) isLocalRootSpan(id SpanID) bool {
+ return id == s.localRootSpan
+}
+
+func (s *samplingState) setFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old|newFlag)
+ }
+}
+
+func (s *samplingState) unsetFlag(newFlag int32) {
+ swapped := false
+ for !swapped {
+ old := s.stateFlags.Load()
+ swapped = s.stateFlags.CAS(old, old&^newFlag)
+ }
+}
+
+func (s *samplingState) setSampled() {
+ s.setFlag(flagSampled)
+}
+
+func (s *samplingState) unsetSampled() {
+ s.unsetFlag(flagSampled)
+}
+
+func (s *samplingState) setDebugAndSampled() {
+ s.setFlag(flagDebug | flagSampled)
+}
+
+func (s *samplingState) setFirehose() {
+ s.setFlag(flagFirehose)
+}
+
+func (s *samplingState) setFlags(flags byte) {
+ s.stateFlags.Store(int32(flags))
+}
+
+func (s *samplingState) setFinal() {
+ s.final.Store(true)
+}
+
+func (s *samplingState) flags() byte {
+ return byte(s.stateFlags.Load())
+}
+
+func (s *samplingState) isSampled() bool {
+ return s.stateFlags.Load()&flagSampled == flagSampled
+}
+
+func (s *samplingState) isDebug() bool {
+ return s.stateFlags.Load()&flagDebug == flagDebug
+}
+
+func (s *samplingState) isFirehose() bool {
+ return s.stateFlags.Load()&flagFirehose == flagFirehose
+}
+
+func (s *samplingState) isFinal() bool {
+ return s.final.Load()
+}
+
+func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
+ if value, ok := s.extendedState.Load(key); ok {
+ return value
+ }
+ value := initValue()
+ value, _ = s.extendedState.LoadOrStore(key, value)
+ return value
+}
+
+// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+ for k, v := range c.baggage {
+ if !handler(k, v) {
+ break
+ }
+ }
+}
+
+// IsSampled returns whether this trace was chosen for permanent storage
+// by the sampling mechanism of the tracer.
+func (c SpanContext) IsSampled() bool {
+ return c.samplingState.isSampled()
+}
+
+// IsDebug indicates whether sampling was explicitly requested by the service.
+func (c SpanContext) IsDebug() bool {
+ return c.samplingState.isDebug()
+}
+
+// IsSamplingFinalized indicates whether the sampling decision has been finalized.
+func (c SpanContext) IsSamplingFinalized() bool {
+ return c.samplingState.isFinal()
+}
+
+// IsFirehose indicates whether the firehose flag was set
+func (c SpanContext) IsFirehose() bool {
+ return c.samplingState.isFirehose()
+}
+
+// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
+// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
+func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
+ return c.samplingState.extendedStateForKey(key, initValue)
+}
+
+// IsValid indicates whether this context actually represents a valid trace.
+func (c SpanContext) IsValid() bool {
+ return c.traceID.IsValid() && c.spanID != 0
+}
+
+// SetFirehose enables firehose mode for this trace.
+func (c SpanContext) SetFirehose() {
+ c.samplingState.setFirehose()
+}
+
+func (c SpanContext) String() string {
+ var flags int32
+ if c.samplingState != nil {
+ flags = c.samplingState.stateFlags.Load()
+ }
+ if c.traceID.High == 0 {
+ return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
+ }
+ return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
+}
+
+// ContextFromString reconstructs the Context encoded in a string
+func ContextFromString(value string) (SpanContext, error) {
+ var context SpanContext
+ if value == "" {
+ return emptyContext, errEmptyTracerStateString
+ }
+ parts := strings.Split(value, ":")
+ if len(parts) != 4 {
+ return emptyContext, errMalformedTracerStateString
+ }
+ var err error
+ if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
+ return emptyContext, err
+ }
+ if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
+ return emptyContext, err
+ }
+ if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
+ return emptyContext, err
+ }
+ flags, err := strconv.ParseUint(parts[3], 10, 8)
+ if err != nil {
+ return emptyContext, err
+ }
+ context.samplingState = &samplingState{}
+ context.samplingState.setFlags(byte(flags))
+ return context, nil
+}
+
+// TraceID returns the trace ID of this span context
+func (c SpanContext) TraceID() TraceID {
+ return c.traceID
+}
+
+// SpanID returns the span ID of this span context
+func (c SpanContext) SpanID() SpanID {
+ return c.spanID
+}
+
+// ParentID returns the parent span ID of this span context
+func (c SpanContext) ParentID() SpanID {
+ return c.parentID
+}
+
+// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
+func (c SpanContext) Flags() byte {
+ return c.samplingState.flags()
+}
+
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (c SpanContext) isWriteable() bool {
+ state := c.samplingState
+ return !state.isFinal() || state.isSampled()
+}
+
+func (c SpanContext) isSamplingFinalized() bool {
+ return c.samplingState.isFinal()
+}
+
+// NewSpanContext creates a new instance of SpanContext
+func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
+ samplingState := &samplingState{}
+ if sampled {
+ samplingState.setSampled()
+ }
+
+ return SpanContext{
+ traceID: traceID,
+ spanID: spanID,
+ parentID: parentID,
+ samplingState: samplingState,
+ baggage: baggage}
+}
+
+// CopyFrom copies data from ctx into this context, including span identity and baggage.
+// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
+func (c *SpanContext) CopyFrom(ctx *SpanContext) {
+ c.traceID = ctx.traceID
+ c.spanID = ctx.spanID
+ c.parentID = ctx.parentID
+ c.samplingState = ctx.samplingState
+ if l := len(ctx.baggage); l > 0 {
+ c.baggage = make(map[string]string, l)
+ for k, v := range ctx.baggage {
+ c.baggage[k] = v
+ }
+ } else {
+ c.baggage = nil
+ }
+}
+
+// WithBaggageItem creates a new context with an extra baggage item.
+// Delete a baggage item if provided blank value.
+//
+// The SpanContext is designed to be immutable and passed by value. As such,
+// it cannot contain any locks, and should only hold immutable data, including baggage.
+// Another reason for why baggage is immutable is when the span context is passed
+// as a parent when starting a new span. The new span's baggage cannot affect the parent
+// span's baggage, so the child span either needs to take a copy of the parent baggage
+// (which is expensive and unnecessary since baggage rarely changes in the life span of
+// a trace), or it needs to do a copy-on-write, which is the approach taken here.
+func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
+ var newBaggage map[string]string
+ // unset baggage item
+ if value == "" {
+ if _, ok := c.baggage[key]; !ok {
+ return c
+ }
+ newBaggage = make(map[string]string, len(c.baggage))
+ for k, v := range c.baggage {
+ newBaggage[k] = v
+ }
+ delete(newBaggage, key)
+ return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+ }
+ if c.baggage == nil {
+ newBaggage = map[string]string{key: value}
+ } else {
+ newBaggage = make(map[string]string, len(c.baggage)+1)
+ for k, v := range c.baggage {
+ newBaggage[k] = v
+ }
+ newBaggage[key] = value
+ }
+ // Use positional parameters so the compiler will help catch new fields.
+ return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+}
+
+// isDebugIDContainerOnly returns true when the instance of the context is only
+// used to return the debug/correlation ID from extract() method. This happens
+// in the situation when "jaeger-debug-id" header is passed in the carrier to
+// the extract() method, but the request otherwise has no span context in it.
+// Previously this would've returned opentracing.ErrSpanContextNotFound from the
+// extract method, but now it returns a dummy context with only debugID filled in.
+//
+// See JaegerDebugHeader in constants.go
+// See TextMapPropagator#Extract
+func (c *SpanContext) isDebugIDContainerOnly() bool {
+ return !c.traceID.IsValid() && c.debugID != ""
+}
+
+// ------- TraceID -------
+
+func (t TraceID) String() string {
+ if t.High == 0 {
+ return fmt.Sprintf("%016x", t.Low)
+ }
+ return fmt.Sprintf("%016x%016x", t.High, t.Low)
+}
+
+// TraceIDFromString creates a TraceID from a hexadecimal string
+func TraceIDFromString(s string) (TraceID, error) {
+ var hi, lo uint64
+ var err error
+ if len(s) > 32 {
+ return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
+ } else if len(s) > 16 {
+ hiLen := len(s) - 16
+ if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ } else {
+ if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
+ return TraceID{}, err
+ }
+ }
+ return TraceID{High: hi, Low: lo}, nil
+}
+
+// IsValid checks if the trace ID is valid, i.e. not zero.
+func (t TraceID) IsValid() bool {
+ return t.High != 0 || t.Low != 0
+}
+
+// ------- SpanID -------
+
+func (s SpanID) String() string {
+ return fmt.Sprintf("%016x", uint64(s))
+}
+
+// SpanIDFromString creates a SpanID from a hexadecimal string
+func SpanIDFromString(s string) (SpanID, error) {
+ if len(s) > 16 {
+ return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
+ }
+ id, err := strconv.ParseUint(s, 16, 64)
+ if err != nil {
+ return SpanID(0), err
+ }
+ return SpanID(id), nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
new file mode 100644
index 0000000..54cd3b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
new file mode 100644
index 0000000..a0df507
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
@@ -0,0 +1,28 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
new file mode 100644
index 0000000..6472e84
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -0,0 +1,396 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+type Agent interface {
+ // Parameters:
+ // - Spans
+ EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
+ // Parameters:
+ // - Batch
+ EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
+}
+
+type AgentClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+ return &AgentClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+ return &AgentClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewAgentClient(c thrift.TClient) *AgentClient {
+ return &AgentClient{
+ c: c,
+ }
+}
+
+func (p *AgentClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - Spans
+func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
+ var _args0 AgentEmitZipkinBatchArgs
+ _args0.Spans = spans
+ p.SetLastResponseMeta_(thrift.ResponseMeta{})
+ if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+// Parameters:
+// - Batch
+func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
+ var _args1 AgentEmitBatchArgs
+ _args1.Batch = batch
+ p.SetLastResponseMeta_(thrift.ResponseMeta{})
+ if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+type AgentProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+ self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
+ self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
+return self2
+}
+
+func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x3.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x3
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitZipkinBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ _ = tickerCancel
+
+ if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
+ tickerCancel()
+ return true, thrift.WrapTException(err2)
+ }
+ tickerCancel()
+ return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+ handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := AgentEmitBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ _ = tickerCancel
+
+ if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
+ tickerCancel()
+ return true, thrift.WrapTException(err2)
+ }
+ tickerCancel()
+ return true, nil
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type AgentEmitZipkinBatchArgs struct {
+ Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+ return &AgentEmitZipkinBatchArgs{}
+}
+
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+ return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*zipkincore.Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem4 := &zipkincore.Span{}
+ if err := _elem4.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Spans = append(p.Spans, _elem4)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+ return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Batch
+type AgentEmitBatchArgs struct {
+ Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+ return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+ if !p.IsSetBatch() {
+ return AgentEmitBatchArgs_Batch_DEFAULT
+ }
+return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+ return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Batch = &jaeger.Batch{}
+ if err := p.Batch.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+ }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
+ if err := p.Batch.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
+ return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
new file mode 100644
index 0000000..712b6a9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
new file mode 100644
index 0000000..39b5a7e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
new file mode 100644
index 0000000..e4d89d5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
@@ -0,0 +1,565 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+// Attributes:
+// - BaggageKey
+// - MaxValueLength
+type BaggageRestriction struct {
+ BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"`
+ MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"`
+}
+
+func NewBaggageRestriction() *BaggageRestriction {
+ return &BaggageRestriction{}
+}
+
+
+func (p *BaggageRestriction) GetBaggageKey() string {
+ return p.BaggageKey
+}
+
+func (p *BaggageRestriction) GetMaxValueLength() int32 {
+ return p.MaxValueLength
+}
+func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetBaggageKey bool = false;
+ var issetMaxValueLength bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetBaggageKey = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetMaxValueLength = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetBaggageKey{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"));
+ }
+ if !issetMaxValueLength{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"));
+ }
+ return nil
+}
+
+func (p *BaggageRestriction) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.BaggageKey = v
+}
+ return nil
+}
+
+func (p *BaggageRestriction) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.MaxValueLength = v
+}
+ return nil
+}
+
+func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) }
+ return err
+}
+
+func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) }
+ return err
+}
+
+func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.BaggageKey != other.BaggageKey { return false }
+ if p.MaxValueLength != other.MaxValueLength { return false }
+ return true
+}
+
+func (p *BaggageRestriction) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestriction(%+v)", *p)
+}
+
+type BaggageRestrictionManager interface {
+ // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+ // Usually, baggageRestrictions apply to all services however there may be situations
+ // where a baggageKey might only be allowed to be set by a specific service.
+ //
+ // Parameters:
+ // - ServiceName
+ GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error)
+}
+
+type BaggageRestrictionManagerClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient {
+ return &BaggageRestrictionManagerClient{
+ c: c,
+ }
+}
+
+func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+// Usually, baggageRestrictions apply to all services however there may be situations
+// where a baggageKey might only be allowed to be set by a specific service.
+//
+// Parameters:
+// - ServiceName
+func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) {
+ var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs
+ _args0.ServiceName = serviceName
+ var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult
+ var _meta1 thrift.ResponseMeta
+ _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2)
+ p.SetLastResponseMeta_(_meta1)
+ if _err != nil {
+ return
+ }
+ return _result2.GetSuccess(), nil
+}
+
+type BaggageRestrictionManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler BaggageRestrictionManager
+}
+
+func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
+
+ self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler}
+return self3
+}
+
+func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x4.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x4
+
+}
+
+type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
+ handler BaggageRestrictionManager
+}
+
+func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+ var retval []*BaggageRestriction
+ if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
+ ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+}
+
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
+ Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
+ return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+}
+
+var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
+ return p.Success
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BaggageRestriction, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i ++ {
+ _elem5 := &BaggageRestriction{}
+ if err := _elem5.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+ }
+ p.Success = append(p.Success, _elem5)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
new file mode 100644
index 0000000..fe45a9f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
new file mode 100644
index 0000000..b6ce855
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
new file mode 100644
index 0000000..d55cca0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
@@ -0,0 +1,2698 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+ "bytes"
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type TagType int64
+const (
+ TagType_STRING TagType = 0
+ TagType_DOUBLE TagType = 1
+ TagType_BOOL TagType = 2
+ TagType_LONG TagType = 3
+ TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+ switch p {
+ case TagType_STRING: return "STRING"
+ case TagType_DOUBLE: return "DOUBLE"
+ case TagType_BOOL: return "BOOL"
+ case TagType_LONG: return "LONG"
+ case TagType_BINARY: return "BINARY"
+ }
+ return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+ switch s {
+ case "STRING": return TagType_STRING, nil
+ case "DOUBLE": return TagType_DOUBLE, nil
+ case "BOOL": return TagType_BOOL, nil
+ case "LONG": return TagType_LONG, nil
+ case "BINARY": return TagType_BINARY, nil
+ }
+ return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+q, err := TagTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *TagType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = TagType(v)
+return nil
+}
+
+func (p * TagType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+type SpanRefType int64
+const (
+ SpanRefType_CHILD_OF SpanRefType = 0
+ SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+ switch p {
+ case SpanRefType_CHILD_OF: return "CHILD_OF"
+ case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM"
+ }
+ return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+ switch s {
+ case "CHILD_OF": return SpanRefType_CHILD_OF, nil
+ case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil
+ }
+ return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+q, err := SpanRefTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SpanRefType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SpanRefType(v)
+return nil
+}
+
+func (p * SpanRefType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+// Attributes:
+// - Key
+// - VType
+// - VStr
+// - VDouble
+// - VBool
+// - VLong
+// - VBinary
+type Tag struct {
+ Key string `thrift:"key,1,required" db:"key" json:"key"`
+ VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
+ VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
+ VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
+ VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
+ VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
+ VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+ return &Tag{}
+}
+
+
+func (p *Tag) GetKey() string {
+ return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+ return p.VType
+}
+var Tag_VStr_DEFAULT string
+func (p *Tag) GetVStr() string {
+ if !p.IsSetVStr() {
+ return Tag_VStr_DEFAULT
+ }
+return *p.VStr
+}
+var Tag_VDouble_DEFAULT float64
+func (p *Tag) GetVDouble() float64 {
+ if !p.IsSetVDouble() {
+ return Tag_VDouble_DEFAULT
+ }
+return *p.VDouble
+}
+var Tag_VBool_DEFAULT bool
+func (p *Tag) GetVBool() bool {
+ if !p.IsSetVBool() {
+ return Tag_VBool_DEFAULT
+ }
+return *p.VBool
+}
+var Tag_VLong_DEFAULT int64
+func (p *Tag) GetVLong() int64 {
+ if !p.IsSetVLong() {
+ return Tag_VLong_DEFAULT
+ }
+return *p.VLong
+}
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+ return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+ return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+ return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+ return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+ return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+ return p.VBinary != nil
+}
+
+func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetKey bool = false;
+ var issetVType bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetKey = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetVType = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField7(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetKey{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"));
+ }
+ if !issetVType{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"));
+ }
+ return nil
+}
+
+func (p *Tag) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Key = v
+}
+ return nil
+}
+
+func (p *Tag) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ temp := TagType(v)
+ p.VType = temp
+}
+ return nil
+}
+
+func (p *Tag) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.VStr = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.VDouble = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.VBool = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 6: ", err)
+} else {
+ p.VLong = &v
+}
+ return nil
+}
+
+func (p *Tag) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+} else {
+ p.VBinary = v
+}
+ return nil
+}
+
+func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField7(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+ return err
+}
+
+func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) }
+ return err
+}
+
+func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVStr() {
+ if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) }
+ if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVDouble() {
+ if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBool() {
+ if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVLong() {
+ if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetVBinary() {
+ if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) }
+ }
+ return err
+}
+
+func (p *Tag) Equals(other *Tag) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Key != other.Key { return false }
+ if p.VType != other.VType { return false }
+ if p.VStr != other.VStr {
+ if p.VStr == nil || other.VStr == nil {
+ return false
+ }
+ if (*p.VStr) != (*other.VStr) { return false }
+ }
+ if p.VDouble != other.VDouble {
+ if p.VDouble == nil || other.VDouble == nil {
+ return false
+ }
+ if (*p.VDouble) != (*other.VDouble) { return false }
+ }
+ if p.VBool != other.VBool {
+ if p.VBool == nil || other.VBool == nil {
+ return false
+ }
+ if (*p.VBool) != (*other.VBool) { return false }
+ }
+ if p.VLong != other.VLong {
+ if p.VLong == nil || other.VLong == nil {
+ return false
+ }
+ if (*p.VLong) != (*other.VLong) { return false }
+ }
+ if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false }
+ return true
+}
+
+func (p *Tag) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+// - Timestamp
+// - Fields
+type Log struct {
+ Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
+ Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
+}
+
+func NewLog() *Log {
+ return &Log{}
+}
+
+
+func (p *Log) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+ return p.Fields
+}
+func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTimestamp bool = false;
+ var issetFields bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetTimestamp = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetFields = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTimestamp{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"));
+ }
+ if !issetFields{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"));
+ }
+ return nil
+}
+
+func (p *Log) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Timestamp = v
+}
+ return nil
+}
+
+func (p *Log) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Fields = tSlice
+ for i := 0; i < size; i ++ {
+ _elem0 := &Tag{}
+ if err := _elem0.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Fields = append(p.Fields, _elem0)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+ return err
+}
+
+func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Fields {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) }
+ return err
+}
+
+func (p *Log) Equals(other *Log) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Timestamp != other.Timestamp { return false }
+ if len(p.Fields) != len(other.Fields) { return false }
+ for i, _tgt := range p.Fields {
+ _src1 := other.Fields[i]
+ if !_tgt.Equals(_src1) { return false }
+ }
+ return true
+}
+
+func (p *Log) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+// - RefType
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+type SpanRef struct {
+ RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
+ TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+ return &SpanRef{}
+}
+
+
+func (p *SpanRef) GetRefType() SpanRefType {
+ return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+ return p.SpanId
+}
+func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetRefType bool = false;
+ var issetTraceIdLow bool = false;
+ var issetTraceIdHigh bool = false;
+ var issetSpanId bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetRefType = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetRefType{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"));
+ }
+ if !issetTraceIdLow{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+ }
+ if !issetTraceIdHigh{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+ }
+ if !issetSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+ }
+ return nil
+}
+
+func (p *SpanRef) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ temp := SpanRefType(v)
+ p.RefType = temp
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TraceIdLow = v
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.TraceIdHigh = v
+}
+ return nil
+}
+
+func (p *SpanRef) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.SpanId = v
+}
+ return nil
+}
+
+func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) }
+ return err
+}
+
+func (p *SpanRef) Equals(other *SpanRef) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.RefType != other.RefType { return false }
+ if p.TraceIdLow != other.TraceIdLow { return false }
+ if p.TraceIdHigh != other.TraceIdHigh { return false }
+ if p.SpanId != other.SpanId { return false }
+ return true
+}
+
+func (p *SpanRef) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+// - TraceIdLow
+// - TraceIdHigh
+// - SpanId
+// - ParentSpanId
+// - OperationName
+// - References
+// - Flags
+// - StartTime
+// - Duration
+// - Tags
+// - Logs
+type Span struct {
+ TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
+ TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
+ SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
+ ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
+ OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
+ References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
+ Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
+ StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
+ Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
+ Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
+ Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+
+func (p *Span) GetTraceIdLow() int64 {
+ return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+ return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+ return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+ return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+ return p.OperationName
+}
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+ return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+ return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+ return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+ return p.Duration
+}
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+ return p.Tags
+}
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+ return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+ return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+ return p.Logs != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetTraceIdLow bool = false;
+ var issetTraceIdHigh bool = false;
+ var issetSpanId bool = false;
+ var issetParentSpanId bool = false;
+ var issetOperationName bool = false;
+ var issetFlags bool = false;
+ var issetStartTime bool = false;
+ var issetDuration bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdLow = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTraceIdHigh = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ issetParentSpanId = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ issetOperationName = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 7:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField7(ctx, iprot); err != nil {
+ return err
+ }
+ issetFlags = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField8(ctx, iprot); err != nil {
+ return err
+ }
+ issetStartTime = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 9:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField9(ctx, iprot); err != nil {
+ return err
+ }
+ issetDuration = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 10:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField10(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 11:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField11(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetTraceIdLow{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+ }
+ if !issetTraceIdHigh{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+ }
+ if !issetSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+ }
+ if !issetParentSpanId{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"));
+ }
+ if !issetOperationName{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"));
+ }
+ if !issetFlags{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"));
+ }
+ if !issetStartTime{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"));
+ }
+ if !issetDuration{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"));
+ }
+ return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.TraceIdLow = v
+}
+ return nil
+}
+
+func (p *Span) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TraceIdHigh = v
+}
+ return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.SpanId = v
+}
+ return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.ParentSpanId = v
+}
+ return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.OperationName = v
+}
+ return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*SpanRef, 0, size)
+ p.References = tSlice
+ for i := 0; i < size; i ++ {
+ _elem2 := &SpanRef{}
+ if err := _elem2.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+ }
+ p.References = append(p.References, _elem2)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 7: ", err)
+} else {
+ p.Flags = v
+}
+ return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 8: ", err)
+} else {
+ p.StartTime = v
+}
+ return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+} else {
+ p.Duration = v
+}
+ return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i ++ {
+ _elem3 := &Tag{}
+ if err := _elem3.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+ }
+ p.Tags = append(p.Tags, _elem3)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Log, 0, size)
+ p.Logs = tSlice
+ for i := 0; i < size; i ++ {
+ _elem4 := &Log{}
+ if err := _elem4.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+ }
+ p.Logs = append(p.Logs, _elem4)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField7(ctx, oprot); err != nil { return err }
+ if err := p.writeField8(ctx, oprot); err != nil { return err }
+ if err := p.writeField9(ctx, oprot); err != nil { return err }
+ if err := p.writeField10(ctx, oprot); err != nil { return err }
+ if err := p.writeField11(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetReferences() {
+ if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.References {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetLogs() {
+ if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Logs {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.TraceIdLow != other.TraceIdLow { return false }
+ if p.TraceIdHigh != other.TraceIdHigh { return false }
+ if p.SpanId != other.SpanId { return false }
+ if p.ParentSpanId != other.ParentSpanId { return false }
+ if p.OperationName != other.OperationName { return false }
+ if len(p.References) != len(other.References) { return false }
+ for i, _tgt := range p.References {
+ _src5 := other.References[i]
+ if !_tgt.Equals(_src5) { return false }
+ }
+ if p.Flags != other.Flags { return false }
+ if p.StartTime != other.StartTime { return false }
+ if p.Duration != other.Duration { return false }
+ if len(p.Tags) != len(other.Tags) { return false }
+ for i, _tgt := range p.Tags {
+ _src6 := other.Tags[i]
+ if !_tgt.Equals(_src6) { return false }
+ }
+ if len(p.Logs) != len(other.Logs) { return false }
+ for i, _tgt := range p.Logs {
+ _src7 := other.Logs[i]
+ if !_tgt.Equals(_src7) { return false }
+ }
+ return true
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - ServiceName
+// - Tags
+type Process struct {
+ ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
+ Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+ return &Process{}
+}
+
+
+func (p *Process) GetServiceName() string {
+ return p.ServiceName
+}
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+ return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+ return p.Tags != nil
+}
+
+func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetServiceName bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetServiceName = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetServiceName{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"));
+ }
+ return nil
+}
+
+func (p *Process) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *Process) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Tag, 0, size)
+ p.Tags = tSlice
+ for i := 0; i < size; i ++ {
+ _elem8 := &Tag{}
+ if err := _elem8.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
+ }
+ p.Tags = append(p.Tags, _elem8)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+ return err
+}
+
+func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTags() {
+ if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Tags {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) }
+ }
+ return err
+}
+
+func (p *Process) Equals(other *Process) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.ServiceName != other.ServiceName { return false }
+ if len(p.Tags) != len(other.Tags) { return false }
+ for i, _tgt := range p.Tags {
+ _src9 := other.Tags[i]
+ if !_tgt.Equals(_src9) { return false }
+ }
+ return true
+}
+
+func (p *Process) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+// - FullQueueDroppedSpans
+// - TooLargeDroppedSpans
+// - FailedToEmitSpans
+type ClientStats struct {
+ FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
+ TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
+ FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+ return &ClientStats{}
+}
+
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+ return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+ return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+ return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetFullQueueDroppedSpans bool = false;
+ var issetTooLargeDroppedSpans bool = false;
+ var issetFailedToEmitSpans bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetFullQueueDroppedSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetTooLargeDroppedSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetFailedToEmitSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetFullQueueDroppedSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"));
+ }
+ if !issetTooLargeDroppedSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"));
+ }
+ if !issetFailedToEmitSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"));
+ }
+ return nil
+}
+
+func (p *ClientStats) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.FullQueueDroppedSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.TooLargeDroppedSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.FailedToEmitSpans = v
+}
+ return nil
+}
+
+func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) }
+ return err
+}
+
+func (p *ClientStats) Equals(other *ClientStats) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false }
+ if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false }
+ if p.FailedToEmitSpans != other.FailedToEmitSpans { return false }
+ return true
+}
+
+func (p *ClientStats) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+// - Process
+// - Spans
+// - SeqNo
+// - Stats
+type Batch struct {
+ Process *Process `thrift:"process,1,required" db:"process" json:"process"`
+ Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
+ SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
+ Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+ return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+func (p *Batch) GetProcess() *Process {
+ if !p.IsSetProcess() {
+ return Batch_Process_DEFAULT
+ }
+return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+ return p.Spans
+}
+var Batch_SeqNo_DEFAULT int64
+func (p *Batch) GetSeqNo() int64 {
+ if !p.IsSetSeqNo() {
+ return Batch_SeqNo_DEFAULT
+ }
+return *p.SeqNo
+}
+var Batch_Stats_DEFAULT *ClientStats
+func (p *Batch) GetStats() *ClientStats {
+ if !p.IsSetStats() {
+ return Batch_Stats_DEFAULT
+ }
+return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+ return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+ return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+ return p.Stats != nil
+}
+
+func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetProcess bool = false;
+ var issetSpans bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetProcess = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetSpans = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetProcess{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"));
+ }
+ if !issetSpans{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"));
+ }
+ return nil
+}
+
+func (p *Batch) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Process = &Process{}
+ if err := p.Process.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+ }
+ return nil
+}
+
+func (p *Batch) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem10 := &Span{}
+ if err := _elem10.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+ }
+ p.Spans = append(p.Spans, _elem10)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Batch) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.SeqNo = &v
+}
+ return nil
+}
+
+func (p *Batch) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Stats = &ClientStats{}
+ if err := p.Stats.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+ }
+ return nil
+}
+
+func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) }
+ if err := p.Process.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) }
+ return err
+}
+
+func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) }
+ return err
+}
+
+func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSeqNo() {
+ if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) }
+ }
+ return err
+}
+
+func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetStats() {
+ if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) }
+ if err := p.Stats.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) }
+ }
+ return err
+}
+
+func (p *Batch) Equals(other *Batch) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if !p.Process.Equals(other.Process) { return false }
+ if len(p.Spans) != len(other.Spans) { return false }
+ for i, _tgt := range p.Spans {
+ _src11 := other.Spans[i]
+ if !_tgt.Equals(_src11) { return false }
+ }
+ if p.SeqNo != other.SeqNo {
+ if p.SeqNo == nil || other.SeqNo == nil {
+ return false
+ }
+ if (*p.SeqNo) != (*other.SeqNo) { return false }
+ }
+ if !p.Stats.Equals(other.Stats) { return false }
+ return true
+}
+
+func (p *Batch) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type BatchSubmitResponse struct {
+ Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+ return &BatchSubmitResponse{}
+}
+
+
+func (p *BatchSubmitResponse) GetOk() bool {
+ return p.Ok
+}
+func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+ }
+ return nil
+}
+
+func (p *BatchSubmitResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ok = v
+}
+ return nil
+}
+
+func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+ return err
+}
+
+func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ok != other.Ok { return false }
+ return true
+}
+
+func (p *BatchSubmitResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
+
+type Collector interface {
+ // Parameters:
+ // - Batches
+ SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
+}
+
+type CollectorClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
+ return &CollectorClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
+ return &CollectorClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewCollectorClient(c thrift.TClient) *CollectorClient {
+ return &CollectorClient{
+ c: c,
+ }
+}
+
+func (p *CollectorClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - Batches
+func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
+ var _args12 CollectorSubmitBatchesArgs
+ _args12.Batches = batches
+ var _result14 CollectorSubmitBatchesResult
+ var _meta13 thrift.ResponseMeta
+ _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
+ p.SetLastResponseMeta_(_meta13)
+ if _err != nil {
+ return
+ }
+ return _result14.GetSuccess(), nil
+}
+
+type CollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler Collector
+}
+
+func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewCollectorProcessor(handler Collector) *CollectorProcessor {
+
+ self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler}
+return self15
+}
+
+func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x16.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x16
+
+}
+
+type collectorProcessorSubmitBatches struct {
+ handler Collector
+}
+
+func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := CollectorSubmitBatchesArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := CollectorSubmitBatchesResult{}
+ var retval []*BatchSubmitResponse
+ if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Batches
+type CollectorSubmitBatchesArgs struct {
+ Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
+}
+
+func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
+ return &CollectorSubmitBatchesArgs{}
+}
+
+
+func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
+ return p.Batches
+}
+func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Batch, 0, size)
+ p.Batches = tSlice
+ for i := 0; i < size; i ++ {
+ _elem17 := &Batch{}
+ if err := _elem17.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
+ }
+ p.Batches = append(p.Batches, _elem17)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Batches {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) }
+ return err
+}
+
+func (p *CollectorSubmitBatchesArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type CollectorSubmitBatchesResult struct {
+ Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
+ return &CollectorSubmitBatchesResult{}
+}
+
+var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
+
+func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
+ return p.Success
+}
+func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BatchSubmitResponse, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i ++ {
+ _elem18 := &BatchSubmitResponse{}
+ if err := _elem18.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
+ }
+ p.Success = append(p.Success, _elem18)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *CollectorSubmitBatchesResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
new file mode 100644
index 0000000..015ad4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
new file mode 100644
index 0000000..5cc7628
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
new file mode 100644
index 0000000..3bffa5b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
@@ -0,0 +1,1323 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+import(
+ "bytes"
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type SamplingStrategyType int64
+const (
+ SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+ SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+ switch p {
+ case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC"
+ case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING"
+ }
+ return "<UNSET>"
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+ switch s {
+ case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil
+ case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil
+ }
+ return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+q, err := SamplingStrategyTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SamplingStrategyType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SamplingStrategyType(v)
+return nil
+}
+
+func (p * SamplingStrategyType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+// Attributes:
+// - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+ SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+ return &ProbabilisticSamplingStrategy{}
+}
+
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+ return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetSamplingRate bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetSamplingRate = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetSamplingRate{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"));
+ }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.SamplingRate = v
+}
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) }
+ return err
+}
+
+func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.SamplingRate != other.SamplingRate { return false }
+ return true
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+ MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+ return &RateLimitingSamplingStrategy{}
+}
+
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+ return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetMaxTracesPerSecond bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I16 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetMaxTracesPerSecond = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetMaxTracesPerSecond{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"));
+ }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.MaxTracesPerSecond = v
+}
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) }
+ if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) }
+ return err
+}
+
+func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false }
+ return true
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - Operation
+// - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+ Operation string `thrift:"operation,1,required" db:"operation" json:"operation"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+ return &OperationSamplingStrategy{}
+}
+
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+ return p.Operation
+}
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+ }
+return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOperation bool = false;
+ var issetProbabilisticSampling bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetOperation = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetProbabilisticSampling = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOperation{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"));
+ }
+ if !issetProbabilisticSampling{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"));
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Operation = v
+}
+ return nil
+}
+
+func (p *OperationSamplingStrategy) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Operation)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) }
+ return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+ if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+ return err
+}
+
+func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Operation != other.Operation { return false }
+ if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+ return true
+}
+
+func (p *OperationSamplingStrategy) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+// - DefaultSamplingProbability
+// - DefaultLowerBoundTracesPerSecond
+// - PerOperationStrategies
+// - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+ DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"`
+ DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"`
+ PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"`
+ DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+ return &PerOperationSamplingStrategies{}
+}
+
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+ return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+ return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+ return p.PerOperationStrategies
+}
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+ if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+ return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+ }
+return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+ return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetDefaultSamplingProbability bool = false;
+ var issetDefaultLowerBoundTracesPerSecond bool = false;
+ var issetPerOperationStrategies bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetDefaultSamplingProbability = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ issetDefaultLowerBoundTracesPerSecond = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ issetPerOperationStrategies = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.DOUBLE {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetDefaultSamplingProbability{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"));
+ }
+ if !issetDefaultLowerBoundTracesPerSecond{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"));
+ }
+ if !issetPerOperationStrategies{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"));
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.DefaultSamplingProbability = v
+}
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.DefaultLowerBoundTracesPerSecond = v
+}
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*OperationSamplingStrategy, 0, size)
+ p.PerOperationStrategies = tSlice
+ for i := 0; i < size; i ++ {
+ _elem0 := &OperationSamplingStrategy{}
+ if err := _elem0.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadDouble(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.DefaultUpperBoundTracesPerSecond = &v
+}
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.PerOperationStrategies {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetDefaultUpperBoundTracesPerSecond() {
+ if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+ if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+ }
+ return err
+}
+
+func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false }
+ if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false }
+ if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false }
+ for i, _tgt := range p.PerOperationStrategies {
+ _src1 := other.PerOperationStrategies[i]
+ if !_tgt.Equals(_src1) { return false }
+ }
+ if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond {
+ if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil {
+ return false
+ }
+ if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false }
+ }
+ return true
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+// - StrategyType
+// - ProbabilisticSampling
+// - RateLimitingSampling
+// - OperationSampling
+type SamplingStrategyResponse struct {
+ StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"`
+ ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"`
+ RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"`
+ OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+ return &SamplingStrategyResponse{}
+}
+
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+ return p.StrategyType
+}
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+ if !p.IsSetProbabilisticSampling() {
+ return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+ }
+return p.ProbabilisticSampling
+}
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+ if !p.IsSetRateLimitingSampling() {
+ return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+ }
+return p.RateLimitingSampling
+}
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+ if !p.IsSetOperationSampling() {
+ return SamplingStrategyResponse_OperationSampling_DEFAULT
+ }
+return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+ return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+ return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+ return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetStrategyType bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetStrategyType = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetStrategyType{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"));
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ temp := SamplingStrategyType(v)
+ p.StrategyType = temp
+}
+ return nil
+}
+
+func (p *SamplingStrategyResponse) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+ if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+ if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ p.OperationSampling = &PerOperationSamplingStrategies{}
+ if err := p.OperationSampling.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+ }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetProbabilisticSampling() {
+ if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+ if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetRateLimitingSampling() {
+ if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) }
+ if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetOperationSampling() {
+ if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) }
+ if err := p.OperationSampling.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) }
+ }
+ return err
+}
+
+func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.StrategyType != other.StrategyType { return false }
+ if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+ if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false }
+ if !p.OperationSampling.Equals(other.OperationSampling) { return false }
+ return true
+}
+
+func (p *SamplingStrategyResponse) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
+
+type SamplingManager interface {
+ // Parameters:
+ // - ServiceName
+ GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error)
+}
+
+type SamplingManagerClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+ return &SamplingManagerClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+ return &SamplingManagerClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient {
+ return &SamplingManagerClient{
+ c: c,
+ }
+}
+
+func (p *SamplingManagerClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) {
+ var _args2 SamplingManagerGetSamplingStrategyArgs
+ _args2.ServiceName = serviceName
+ var _result4 SamplingManagerGetSamplingStrategyResult
+ var _meta3 thrift.ResponseMeta
+ _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4)
+ p.SetLastResponseMeta_(_meta3)
+ if _err != nil {
+ return
+ }
+ return _result4.GetSuccess(), nil
+}
+
+type SamplingManagerProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+ self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler}
+return self5
+}
+
+func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x6.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x6
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+ handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := SamplingManagerGetSamplingStrategyArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := SamplingManagerGetSamplingStrategyResult{}
+ var retval *SamplingStrategyResponse
+ if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+ ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+ return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+ return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+ Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+ return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+ if !p.IsSetSuccess() {
+ return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+ }
+return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Success = &SamplingStrategyResponse{}
+ if err := p.Success.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+ }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := p.Success.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 0000000..ebf4301
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
new file mode 100644
index 0000000..7a924b9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
@@ -0,0 +1,39 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+ "bytes"
+ "context"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const MESSAGE_SEND = "ms"
+const MESSAGE_RECV = "mr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+const MESSAGE_ADDR = "ma"
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
new file mode 100644
index 0000000..b00ecd2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
@@ -0,0 +1,1853 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+ "bytes"
+ "context"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "time"
+ "github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type AnnotationType int64
+const (
+ AnnotationType_BOOL AnnotationType = 0
+ AnnotationType_BYTES AnnotationType = 1
+ AnnotationType_I16 AnnotationType = 2
+ AnnotationType_I32 AnnotationType = 3
+ AnnotationType_I64 AnnotationType = 4
+ AnnotationType_DOUBLE AnnotationType = 5
+ AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+ switch p {
+ case AnnotationType_BOOL: return "BOOL"
+ case AnnotationType_BYTES: return "BYTES"
+ case AnnotationType_I16: return "I16"
+ case AnnotationType_I32: return "I32"
+ case AnnotationType_I64: return "I64"
+ case AnnotationType_DOUBLE: return "DOUBLE"
+ case AnnotationType_STRING: return "STRING"
+ }
+ return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+ switch s {
+ case "BOOL": return AnnotationType_BOOL, nil
+ case "BYTES": return AnnotationType_BYTES, nil
+ case "I16": return AnnotationType_I16, nil
+ case "I32": return AnnotationType_I32, nil
+ case "I64": return AnnotationType_I64, nil
+ case "DOUBLE": return AnnotationType_DOUBLE, nil
+ case "STRING": return AnnotationType_STRING, nil
+ }
+ return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+q, err := AnnotationTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = AnnotationType(v)
+return nil
+}
+
+func (p * AnnotationType) Value() (driver.Value, error) {
+ if p == nil {
+ return nil, nil
+ }
+return int64(*p), nil
+}
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+//
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+//
+// Attributes:
+// - Ipv4: IPv4 host address packed into 4 bytes.
+//
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+// - Port: IPv4 port
+//
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+//
+// Conventionally, when the port isn't known, port = 0.
+// - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+//
+// Conventionally, when the service name isn't known, service_name = "unknown".
+// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+ Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+ Port int16 `thrift:"port,2" db:"port" json:"port"`
+ ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+ Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+ return &Endpoint{}
+}
+
+
+func (p *Endpoint) GetIpv4() int32 {
+ return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+ return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+ return p.ServiceName
+}
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+ return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+ return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.I16 {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Endpoint) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ipv4 = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI16(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Port = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.ServiceName = v
+}
+ return nil
+}
+
+func (p *Endpoint) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.Ipv6 = v
+}
+ return nil
+}
+
+func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
+ if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
+ return err
+}
+
+func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetIpv6() {
+ if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
+ }
+ return err
+}
+
+func (p *Endpoint) Equals(other *Endpoint) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ipv4 != other.Ipv4 { return false }
+ if p.Port != other.Port { return false }
+ if p.ServiceName != other.ServiceName { return false }
+ if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false }
+ return true
+}
+
+func (p *Endpoint) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+//
+// Attributes:
+// - Timestamp: Microseconds from epoch.
+//
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+// - Value
+// - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+ Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+ Value string `thrift:"value,2" db:"value" json:"value"`
+ Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+ return &Annotation{}
+}
+
+
+func (p *Annotation) GetTimestamp() int64 {
+ return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+ return p.Value
+}
+var Annotation_Host_DEFAULT *Endpoint
+func (p *Annotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return Annotation_Host_DEFAULT
+ }
+return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Annotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Timestamp = v
+}
+ return nil
+}
+
+func (p *Annotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Value = v
+}
+ return nil
+}
+
+func (p *Annotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+ return err
+}
+
+func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+ return err
+}
+
+func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
+ if err := p.Host.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
+ }
+ return err
+}
+
+func (p *Annotation) Equals(other *Annotation) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Timestamp != other.Timestamp { return false }
+ if p.Value != other.Value { return false }
+ if !p.Host.Equals(other.Host) { return false }
+ return true
+}
+
+func (p *Annotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+//
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+//
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+//
+// Attributes:
+// - Key
+// - Value
+// - AnnotationType
+// - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+//
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+ Key string `thrift:"key,1" db:"key" json:"key"`
+ Value []byte `thrift:"value,2" db:"value" json:"value"`
+ AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+ Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+ return &BinaryAnnotation{}
+}
+
+
+func (p *BinaryAnnotation) GetKey() string {
+ return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+ return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+ return p.AnnotationType
+}
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+ if !p.IsSetHost() {
+ return BinaryAnnotation_Host_DEFAULT
+ }
+return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+ return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField2(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.I32 {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.STRUCT {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Key = v
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBinary(ctx); err != nil {
+ return thrift.PrependError("error reading field 2: ", err)
+} else {
+ p.Value = v
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI32(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ temp := AnnotationType(v)
+ p.AnnotationType = temp
+}
+ return nil
+}
+
+func (p *BinaryAnnotation) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ p.Host = &Endpoint{}
+ if err := p.Host.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+ }
+ return nil
+}
+
+func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField2(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+ if err := oprot.WriteBinary(ctx, p.Value); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
+ if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
+ return err
+}
+
+func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetHost() {
+ if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
+ if err := p.Host.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
+ }
+ return err
+}
+
+func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Key != other.Key { return false }
+ if bytes.Compare(p.Value, other.Value) != 0 { return false }
+ if p.AnnotationType != other.AnnotationType { return false }
+ if !p.Host.Equals(other.Host) { return false }
+ return true
+}
+
+func (p *BinaryAnnotation) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+//
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+//
+// Attributes:
+// - TraceID
+// - Name: Span name in lowercase, rpc method for example
+//
+// Conventionally, when the span name isn't known, name = "unknown".
+// - ID
+// - ParentID
+// - Annotations
+// - BinaryAnnotations
+// - Debug
+// - Timestamp: Microseconds from epoch of the creation of this span.
+//
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+//
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+// - Duration: Measurement of duration in microseconds, used to support queries.
+//
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+//
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+//
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+//
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+ TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+ // unused field # 2
+ Name string `thrift:"name,3" db:"name" json:"name"`
+ ID int64 `thrift:"id,4" db:"id" json:"id"`
+ ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+ Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+ // unused field # 7
+ BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+ Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
+ Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+ Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+ TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+ return &Span{}
+}
+
+
+func (p *Span) GetTraceID() int64 {
+ return p.TraceID
+}
+
+func (p *Span) GetName() string {
+ return p.Name
+}
+
+func (p *Span) GetID() int64 {
+ return p.ID
+}
+var Span_ParentID_DEFAULT int64
+func (p *Span) GetParentID() int64 {
+ if !p.IsSetParentID() {
+ return Span_ParentID_DEFAULT
+ }
+return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+ return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+ return p.BinaryAnnotations
+}
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+ return p.Debug
+}
+var Span_Timestamp_DEFAULT int64
+func (p *Span) GetTimestamp() int64 {
+ if !p.IsSetTimestamp() {
+ return Span_Timestamp_DEFAULT
+ }
+return *p.Timestamp
+}
+var Span_Duration_DEFAULT int64
+func (p *Span) GetDuration() int64 {
+ if !p.IsSetDuration() {
+ return Span_Duration_DEFAULT
+ }
+return *p.Duration
+}
+var Span_TraceIDHigh_DEFAULT int64
+func (p *Span) GetTraceIDHigh() int64 {
+ if !p.IsSetTraceIDHigh() {
+ return Span_TraceIDHigh_DEFAULT
+ }
+return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+ return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+ return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+ return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+ return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+ return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 3:
+ if fieldTypeId == thrift.STRING {
+ if err := p.ReadField3(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 4:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField4(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 5:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField5(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 6:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField6(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 8:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField8(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 9:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField9(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 10:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField10(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 11:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField11(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ case 12:
+ if fieldTypeId == thrift.I64 {
+ if err := p.ReadField12(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.TraceID = v
+}
+ return nil
+}
+
+func (p *Span) ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadString(ctx); err != nil {
+ return thrift.PrependError("error reading field 3: ", err)
+} else {
+ p.Name = v
+}
+ return nil
+}
+
+func (p *Span) ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 4: ", err)
+} else {
+ p.ID = v
+}
+ return nil
+}
+
+func (p *Span) ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 5: ", err)
+} else {
+ p.ParentID = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Annotation, 0, size)
+ p.Annotations = tSlice
+ for i := 0; i < size; i ++ {
+ _elem0 := &Annotation{}
+ if err := _elem0.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+ }
+ p.Annotations = append(p.Annotations, _elem0)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*BinaryAnnotation, 0, size)
+ p.BinaryAnnotations = tSlice
+ for i := 0; i < size; i ++ {
+ _elem1 := &BinaryAnnotation{}
+ if err := _elem1.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+ }
+ p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *Span) ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 9: ", err)
+} else {
+ p.Debug = v
+}
+ return nil
+}
+
+func (p *Span) ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 10: ", err)
+} else {
+ p.Timestamp = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 11: ", err)
+} else {
+ p.Duration = &v
+}
+ return nil
+}
+
+func (p *Span) ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadI64(ctx); err != nil {
+ return thrift.PrependError("error reading field 12: ", err)
+} else {
+ p.TraceIDHigh = &v
+}
+ return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ if err := p.writeField3(ctx, oprot); err != nil { return err }
+ if err := p.writeField4(ctx, oprot); err != nil { return err }
+ if err := p.writeField5(ctx, oprot); err != nil { return err }
+ if err := p.writeField6(ctx, oprot); err != nil { return err }
+ if err := p.writeField8(ctx, oprot); err != nil { return err }
+ if err := p.writeField9(ctx, oprot); err != nil { return err }
+ if err := p.writeField10(ctx, oprot); err != nil { return err }
+ if err := p.writeField11(ctx, oprot); err != nil { return err }
+ if err := p.writeField12(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
+ if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetParentID() {
+ if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Annotations {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.BinaryAnnotations {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
+ return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetDebug() {
+ if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTimestamp() {
+ if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetDuration() {
+ if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetTraceIDHigh() {
+ if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
+ if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
+ }
+ return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.TraceID != other.TraceID { return false }
+ if p.Name != other.Name { return false }
+ if p.ID != other.ID { return false }
+ if p.ParentID != other.ParentID {
+ if p.ParentID == nil || other.ParentID == nil {
+ return false
+ }
+ if (*p.ParentID) != (*other.ParentID) { return false }
+ }
+ if len(p.Annotations) != len(other.Annotations) { return false }
+ for i, _tgt := range p.Annotations {
+ _src2 := other.Annotations[i]
+ if !_tgt.Equals(_src2) { return false }
+ }
+ if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false }
+ for i, _tgt := range p.BinaryAnnotations {
+ _src3 := other.BinaryAnnotations[i]
+ if !_tgt.Equals(_src3) { return false }
+ }
+ if p.Debug != other.Debug { return false }
+ if p.Timestamp != other.Timestamp {
+ if p.Timestamp == nil || other.Timestamp == nil {
+ return false
+ }
+ if (*p.Timestamp) != (*other.Timestamp) { return false }
+ }
+ if p.Duration != other.Duration {
+ if p.Duration == nil || other.Duration == nil {
+ return false
+ }
+ if (*p.Duration) != (*other.Duration) { return false }
+ }
+ if p.TraceIDHigh != other.TraceIDHigh {
+ if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
+ return false
+ }
+ if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false }
+ }
+ return true
+}
+
+func (p *Span) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+// - Ok
+type Response struct {
+ Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewResponse() *Response {
+ return &Response{}
+}
+
+
+func (p *Response) GetOk() bool {
+ return p.Ok
+}
+func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+ var issetOk bool = false;
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.BOOL {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ issetOk = true
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ if !issetOk{
+ return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+ }
+ return nil
+}
+
+func (p *Response) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ if v, err := iprot.ReadBool(ctx); err != nil {
+ return thrift.PrependError("error reading field 1: ", err)
+} else {
+ p.Ok = v
+}
+ return nil
+}
+
+func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+ if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+ return err
+}
+
+func (p *Response) Equals(other *Response) bool {
+ if p == other {
+ return true
+ } else if p == nil || other == nil {
+ return false
+ }
+ if p.Ok != other.Ok { return false }
+ return true
+}
+
+func (p *Response) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("Response(%+v)", *p)
+}
+
+type ZipkinCollector interface {
+ // Parameters:
+ // - Spans
+ SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
+}
+
+type ZipkinCollectorClient struct {
+ c thrift.TClient
+ meta thrift.ResponseMeta
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+ }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: thrift.NewTStandardClient(iprot, oprot),
+ }
+}
+
+func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
+ return &ZipkinCollectorClient{
+ c: c,
+ }
+}
+
+func (p *ZipkinCollectorClient) Client_() thrift.TClient {
+ return p.c
+}
+
+func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+ return p.meta
+}
+
+func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+ p.meta = meta
+}
+
+// Parameters:
+// - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
+ var _args4 ZipkinCollectorSubmitZipkinBatchArgs
+ _args4.Spans = spans
+ var _result6 ZipkinCollectorSubmitZipkinBatchResult
+ var _meta5 thrift.ResponseMeta
+ _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
+ p.SetLastResponseMeta_(_meta5)
+ if _err != nil {
+ return
+ }
+ return _result6.GetSuccess(), nil
+}
+
+type ZipkinCollectorProcessor struct {
+ processorMap map[string]thrift.TProcessorFunction
+ handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+ p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+ processor, ok = p.processorMap[key]
+ return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+ return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+ self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+ self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler}
+return self7
+}
+
+func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+ if err2 != nil { return false, thrift.WrapTException(err2) }
+ if processor, ok := p.GetProcessorFunction(name); ok {
+ return processor.Process(ctx, seqId, iprot, oprot)
+ }
+ iprot.Skip(ctx, thrift.STRUCT)
+ iprot.ReadMessageEnd(ctx)
+ x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+ oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+ x8.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, x8
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+ handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+ args := ZipkinCollectorSubmitZipkinBatchArgs{}
+ var err2 error
+ if err2 = args.Read(ctx, iprot); err2 != nil {
+ iprot.ReadMessageEnd(ctx)
+ x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return false, thrift.WrapTException(err2)
+ }
+ iprot.ReadMessageEnd(ctx)
+
+ tickerCancel := func() {}
+ // Start a goroutine to do server side connectivity check.
+ if thrift.ServerConnectivityCheckInterval > 0 {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithCancel(ctx)
+ defer cancel()
+ var tickerCtx context.Context
+ tickerCtx, tickerCancel = context.WithCancel(context.Background())
+ defer tickerCancel()
+ go func(ctx context.Context, cancel context.CancelFunc) {
+ ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+ defer ticker.Stop()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-ticker.C:
+ if !iprot.Transport().IsOpen() {
+ cancel()
+ return
+ }
+ }
+ }
+ }(tickerCtx, cancel)
+ }
+
+ result := ZipkinCollectorSubmitZipkinBatchResult{}
+ var retval []*Response
+ if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
+ tickerCancel()
+ if err2 == thrift.ErrAbandonRequest {
+ return false, thrift.WrapTException(err2)
+ }
+ x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error())
+ oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+ x.Write(ctx, oprot)
+ oprot.WriteMessageEnd(ctx)
+ oprot.Flush(ctx)
+ return true, thrift.WrapTException(err2)
+ } else {
+ result.Success = retval
+ }
+ tickerCancel()
+ if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+ err = thrift.WrapTException(err2)
+ }
+ if err != nil {
+ return
+ }
+ return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+// - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+ Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+ return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+ return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 1:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField1(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Span, 0, size)
+ p.Spans = tSlice
+ for i := 0; i < size; i ++ {
+ _elem9 := &Span{}
+ if err := _elem9.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
+ }
+ p.Spans = append(p.Spans, _elem9)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField1(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Spans {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+// - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+ Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+ return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+ return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+ return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+ if _, err := iprot.ReadStructBegin(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+ }
+
+
+ for {
+ _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+ }
+ if fieldTypeId == thrift.STOP { break; }
+ switch fieldId {
+ case 0:
+ if fieldTypeId == thrift.LIST {
+ if err := p.ReadField0(ctx, iprot); err != nil {
+ return err
+ }
+ } else {
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ default:
+ if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+ _, size, err := iprot.ReadListBegin(ctx)
+ if err != nil {
+ return thrift.PrependError("error reading list begin: ", err)
+ }
+ tSlice := make([]*Response, 0, size)
+ p.Success = tSlice
+ for i := 0; i < size; i ++ {
+ _elem10 := &Response{}
+ if err := _elem10.Read(ctx, iprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+ }
+ p.Success = append(p.Success, _elem10)
+ }
+ if err := iprot.ReadListEnd(ctx); err != nil {
+ return thrift.PrependError("error reading list end: ", err)
+ }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+ if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+ if p != nil {
+ if err := p.writeField0(ctx, oprot); err != nil { return err }
+ }
+ if err := oprot.WriteFieldStop(ctx); err != nil {
+ return thrift.PrependError("write field stop error: ", err) }
+ if err := oprot.WriteStructEnd(ctx); err != nil {
+ return thrift.PrependError("write struct stop error: ", err) }
+ return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+ if p.IsSetSuccess() {
+ if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+ if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+ return thrift.PrependError("error writing list begin: ", err)
+ }
+ for _, v := range p.Success {
+ if err := v.Write(ctx, oprot); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+ }
+ }
+ if err := oprot.WriteListEnd(ctx); err != nil {
+ return thrift.PrependError("error writing list end: ", err)
+ }
+ if err := oprot.WriteFieldEnd(ctx); err != nil {
+ return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+ }
+ return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+ if p == nil {
+ return "<nil>"
+ }
+ return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
new file mode 100644
index 0000000..c4c38ae
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -0,0 +1,11 @@
+# Apache Thrift
+
+This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
+
+It is vendored code to avoid compatibility issues with Thrift versions.
+
+The file logger.go is modified to remove dependency on "testing" (see Issue #585).
+
+See:
+ * https://github.com/jaegertracing/jaeger-client-go/pull/584
+ * https://github.com/jaegertracing/jaeger-client-go/pull/303
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
new file mode 100644
index 0000000..32d5b01
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+)
+
+const (
+ UNKNOWN_APPLICATION_EXCEPTION = 0
+ UNKNOWN_METHOD = 1
+ INVALID_MESSAGE_TYPE_EXCEPTION = 2
+ WRONG_METHOD_NAME = 3
+ BAD_SEQUENCE_ID = 4
+ MISSING_RESULT = 5
+ INTERNAL_ERROR = 6
+ PROTOCOL_ERROR = 7
+ INVALID_TRANSFORM = 8
+ INVALID_PROTOCOL = 9
+ UNSUPPORTED_CLIENT_TYPE = 10
+)
+
+var defaultApplicationExceptionMessage = map[int32]string{
+ UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception",
+ UNKNOWN_METHOD: "unknown method",
+ INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+ WRONG_METHOD_NAME: "wrong method name",
+ BAD_SEQUENCE_ID: "bad sequence ID",
+ MISSING_RESULT: "missing result",
+ INTERNAL_ERROR: "unknown internal error",
+ PROTOCOL_ERROR: "unknown protocol error",
+ INVALID_TRANSFORM: "Invalid transform",
+ INVALID_PROTOCOL: "Invalid protocol",
+ UNSUPPORTED_CLIENT_TYPE: "Unsupported client type",
+}
+
+// Application level Thrift exception
+type TApplicationException interface {
+ TException
+ TypeId() int32
+ Read(ctx context.Context, iprot TProtocol) error
+ Write(ctx context.Context, oprot TProtocol) error
+}
+
+type tApplicationException struct {
+ message string
+ type_ int32
+}
+
+var _ TApplicationException = (*tApplicationException)(nil)
+
+func (tApplicationException) TExceptionType() TExceptionType {
+ return TExceptionTypeApplication
+}
+
+func (e tApplicationException) Error() string {
+ if e.message != "" {
+ return e.message
+ }
+ return defaultApplicationExceptionMessage[e.type_]
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+ return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+ return p.type_
+}
+
+func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
+ // TODO: this should really be generated by the compiler
+ _, err := iprot.ReadStructBegin(ctx)
+ if err != nil {
+ return err
+ }
+
+ message := ""
+ type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+ for {
+ _, ttype, id, err := iprot.ReadFieldBegin(ctx)
+ if err != nil {
+ return err
+ }
+ if ttype == STOP {
+ break
+ }
+ switch id {
+ case 1:
+ if ttype == STRING {
+ if message, err = iprot.ReadString(ctx); err != nil {
+ return err
+ }
+ } else {
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
+ }
+ }
+ case 2:
+ if ttype == I32 {
+ if type_, err = iprot.ReadI32(ctx); err != nil {
+ return err
+ }
+ } else {
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
+ }
+ }
+ default:
+ if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+ return err
+ }
+ }
+ if err = iprot.ReadFieldEnd(ctx); err != nil {
+ return err
+ }
+ }
+ if err := iprot.ReadStructEnd(ctx); err != nil {
+ return err
+ }
+
+ p.message = message
+ p.type_ = type_
+
+ return nil
+}
+
+func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
+ err = oprot.WriteStructBegin(ctx, "TApplicationException")
+ if len(p.Error()) > 0 {
+ err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteString(ctx, p.Error())
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd(ctx)
+ if err != nil {
+ return
+ }
+ }
+ err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteI32(ctx, p.type_)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldEnd(ctx)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteFieldStop(ctx)
+ if err != nil {
+ return
+ }
+ err = oprot.WriteStructEnd(ctx)
+ return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
new file mode 100644
index 0000000..45c880d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+type TBinaryProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+ cfg *TConfiguration
+ buffer [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+ return NewTBinaryProtocolConf(t, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+ return NewTBinaryProtocolConf(t, &TConfiguration{
+ TBinaryStrictRead: &strictRead,
+ TBinaryStrictWrite: &strictWrite,
+
+ noPropagation: true,
+ })
+}
+
+func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
+ PropagateTConfiguration(t, conf)
+ p := &TBinaryProtocol{
+ origTransport: t,
+ cfg: conf,
+ }
+ if et, ok := t.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(t)
+ }
+ return p
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+ return NewTBinaryProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+ return NewTBinaryProtocolFactoryConf(&TConfiguration{
+ TBinaryStrictRead: &strictRead,
+ TBinaryStrictWrite: &strictWrite,
+
+ noPropagation: true,
+ })
+}
+
+func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
+ return &TBinaryProtocolFactory{
+ cfg: conf,
+ }
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+ return NewTBinaryProtocolConf(t, p.cfg)
+}
+
+func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+ p.cfg = conf
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+ if p.cfg.GetTBinaryStrictWrite() {
+ version := uint32(VERSION_1) | uint32(typeId)
+ e := p.WriteI32(ctx, int32(version))
+ if e != nil {
+ return e
+ }
+ e = p.WriteString(ctx, name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(ctx, seqId)
+ return e
+ } else {
+ e := p.WriteString(ctx, name)
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(ctx, int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(ctx, seqId)
+ return e
+ }
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+ e := p.WriteByte(ctx, int8(typeId))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI16(ctx, id)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
+ e := p.WriteByte(ctx, STOP)
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ e := p.WriteByte(ctx, int8(keyType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteByte(ctx, int8(valueType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(ctx, int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ e := p.WriteByte(ctx, int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(ctx, int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ e := p.WriteByte(ctx, int8(elemType))
+ if e != nil {
+ return e
+ }
+ e = p.WriteI32(ctx, int32(size))
+ return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
+ if value {
+ return p.WriteByte(ctx, 1)
+ }
+ return p.WriteByte(ctx, 0)
+}
+
+func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
+ e := p.trans.WriteByte(byte(value))
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
+ v := p.buffer[0:2]
+ binary.BigEndian.PutUint16(v, uint16(value))
+ _, e := p.trans.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
+ v := p.buffer[0:4]
+ binary.BigEndian.PutUint32(v, uint32(value))
+ _, e := p.trans.Write(v)
+ return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
+ v := p.buffer[0:8]
+ binary.BigEndian.PutUint64(v, uint64(value))
+ _, err := p.trans.Write(v)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
+ return p.WriteI64(ctx, int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
+ e := p.WriteI32(ctx, int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.trans.WriteString(value)
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
+ e := p.WriteI32(ctx, int32(len(value)))
+ if e != nil {
+ return e
+ }
+ _, err := p.trans.Write(value)
+ return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+ size, e := p.ReadI32(ctx)
+ if e != nil {
+ return "", typeId, 0, NewTProtocolException(e)
+ }
+ if size < 0 {
+ typeId = TMessageType(size & 0x0ff)
+ version := int64(int64(size) & VERSION_MASK)
+ if version != VERSION_1 {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+ }
+ name, e = p.ReadString(ctx)
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ seqId, e = p.ReadI32(ctx)
+ if e != nil {
+ return name, typeId, seqId, NewTProtocolException(e)
+ }
+ return name, typeId, seqId, nil
+ }
+ if p.cfg.GetTBinaryStrictRead() {
+ return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+ }
+ name, e2 := p.readStringBody(size)
+ if e2 != nil {
+ return name, typeId, seqId, e2
+ }
+ b, e3 := p.ReadByte(ctx)
+ if e3 != nil {
+ return name, typeId, seqId, e3
+ }
+ typeId = TMessageType(b)
+ seqId, e4 := p.ReadI32(ctx)
+ if e4 != nil {
+ return name, typeId, seqId, e4
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+ return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
+ t, err := p.ReadByte(ctx)
+ typeId = TType(t)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if t != STOP {
+ seqId, err = p.ReadI16(ctx)
+ }
+ return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
+ return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
+ k, e := p.ReadByte(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ kType = TType(k)
+ v, e := p.ReadByte(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ vType = TType(v)
+ size32, e := p.ReadI32(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+ b, e := p.ReadByte(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ return
+}
+
+func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ b, e := p.ReadByte(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ elemType = TType(b)
+ size32, e := p.ReadI32(ctx)
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+ return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
+ b, e := p.ReadByte(ctx)
+ v := true
+ if b != 1 {
+ v = false
+ }
+ return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
+ v, err := p.trans.ReadByte()
+ return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+ buf := p.buffer[0:2]
+ err = p.readAll(ctx, buf)
+ value = int16(binary.BigEndian.Uint16(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+ buf := p.buffer[0:4]
+ err = p.readAll(ctx, buf)
+ value = int32(binary.BigEndian.Uint32(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(ctx, buf)
+ value = int64(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+ buf := p.buffer[0:8]
+ err = p.readAll(ctx, buf)
+ value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+ return value, err
+}
+
+func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
+ size, e := p.ReadI32(ctx)
+ if e != nil {
+ return "", e
+ }
+ err = checkSizeForProtocol(size, p.cfg)
+ if err != nil {
+ return
+ }
+ if size < 0 {
+ err = invalidDataLength
+ return
+ }
+ if size == 0 {
+ return "", nil
+ }
+ if size < int32(len(p.buffer)) {
+ // Avoid allocation on small reads
+ buf := p.buffer[:size]
+ read, e := io.ReadFull(p.trans, buf)
+ return string(buf[:read]), NewTProtocolException(e)
+ }
+
+ return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+ size, e := p.ReadI32(ctx)
+ if e != nil {
+ return nil, e
+ }
+ if err := checkSizeForProtocol(size, p.cfg); err != nil {
+ return nil, err
+ }
+
+ buf, err := safeReadBytes(size, p.trans)
+ return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
+ return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
+ var read int
+ _, deadlineSet := ctx.Deadline()
+ for {
+ read, err = io.ReadFull(p.trans, buf)
+ if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
+ // This is I/O timeout without anything read,
+ // and we still have time left, keep retrying.
+ continue
+ }
+ // For anything else, don't retry
+ break
+ }
+ return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+ buf, err := safeReadBytes(size, p.trans)
+ return string(buf), NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+ PropagateTConfiguration(p.origTransport, conf)
+ p.cfg = conf
+}
+
+var (
+ _ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
+ _ TConfigurationSetter = (*TBinaryProtocol)(nil)
+)
+
+// This function is shared between TBinaryProtocol and TCompactProtocol.
+//
+// It tries to read size bytes from trans, in a way that prevents large
+// allocations when size is insanely large (mostly caused by malformed message).
+func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
+ if size < 0 {
+ return nil, nil
+ }
+
+ buf := new(bytes.Buffer)
+ _, err := io.CopyN(buf, trans, int64(size))
+ return buf.Bytes(), err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
new file mode 100644
index 0000000..ea2c01f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
@@ -0,0 +1,109 @@
+package thrift
+
+import (
+ "context"
+ "fmt"
+)
+
+// ResponseMeta represents the metadata attached to the response.
+type ResponseMeta struct {
+ // The headers in the response, if any.
+ // If the underlying transport/protocol is not THeader, this will always be nil.
+ Headers THeaderMap
+}
+
+type TClient interface {
+ Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+type TStandardClient struct {
+ seqId int32
+ iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+ return &TStandardClient{
+ iprot: inputProtocol,
+ oprot: outputProtocol,
+ }
+}
+
+func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
+ // Set headers from context object on THeaderProtocol
+ if headerProt, ok := oprot.(*THeaderProtocol); ok {
+ headerProt.ClearWriteHeaders()
+ for _, key := range GetWriteHeaderList(ctx) {
+ if value, ok := GetHeader(ctx, key); ok {
+ headerProt.SetWriteHeader(key, value)
+ }
+ }
+ }
+
+ if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
+ return err
+ }
+ if err := args.Write(ctx, oprot); err != nil {
+ return err
+ }
+ if err := oprot.WriteMessageEnd(ctx); err != nil {
+ return err
+ }
+ return oprot.Flush(ctx)
+}
+
+func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
+ rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
+ if err != nil {
+ return err
+ }
+
+ if method != rMethod {
+ return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+ } else if seqId != rSeqId {
+ return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+ } else if rTypeId == EXCEPTION {
+ var exception tApplicationException
+ if err := exception.Read(ctx, iprot); err != nil {
+ return err
+ }
+
+ if err := iprot.ReadMessageEnd(ctx); err != nil {
+ return err
+ }
+
+ return &exception
+ } else if rTypeId != REPLY {
+ return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+ }
+
+ if err := result.Read(ctx, iprot); err != nil {
+ return err
+ }
+
+ return iprot.ReadMessageEnd(ctx)
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+ p.seqId++
+ seqId := p.seqId
+
+ if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
+ return ResponseMeta{}, err
+ }
+
+ // method is oneway
+ if result == nil {
+ return ResponseMeta{}, nil
+ }
+
+ err := p.Recv(ctx, p.iprot, seqId, method, result)
+ var headers THeaderMap
+ if hp, ok := p.iprot.(*THeaderProtocol); ok {
+ headers = hp.transport.readHeaders
+ }
+ return ResponseMeta{
+ Headers: headers,
+ }, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
new file mode 100644
index 0000000..a49225d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+)
+
+const (
+ COMPACT_PROTOCOL_ID = 0x082
+ COMPACT_VERSION = 1
+ COMPACT_VERSION_MASK = 0x1f
+ COMPACT_TYPE_MASK = 0x0E0
+ COMPACT_TYPE_BITS = 0x07
+ COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+ COMPACT_BOOLEAN_TRUE = 0x01
+ COMPACT_BOOLEAN_FALSE = 0x02
+ COMPACT_BYTE = 0x03
+ COMPACT_I16 = 0x04
+ COMPACT_I32 = 0x05
+ COMPACT_I64 = 0x06
+ COMPACT_DOUBLE = 0x07
+ COMPACT_BINARY = 0x08
+ COMPACT_LIST = 0x09
+ COMPACT_SET = 0x0A
+ COMPACT_MAP = 0x0B
+ COMPACT_STRUCT = 0x0C
+)
+
+var (
+ ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+ ttypeToCompactType = map[TType]tCompactType{
+ STOP: STOP,
+ BOOL: COMPACT_BOOLEAN_TRUE,
+ BYTE: COMPACT_BYTE,
+ I16: COMPACT_I16,
+ I32: COMPACT_I32,
+ I64: COMPACT_I64,
+ DOUBLE: COMPACT_DOUBLE,
+ STRING: COMPACT_BINARY,
+ LIST: COMPACT_LIST,
+ SET: COMPACT_SET,
+ MAP: COMPACT_MAP,
+ STRUCT: COMPACT_STRUCT,
+ }
+}
+
+type TCompactProtocolFactory struct {
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTCompactProtocolFactoryConf instead.
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+ return NewTCompactProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
+}
+
+func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
+ return &TCompactProtocolFactory{
+ cfg: conf,
+ }
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTCompactProtocolConf(trans, p.cfg)
+}
+
+func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+ p.cfg = conf
+}
+
+type TCompactProtocol struct {
+ trans TRichTransport
+ origTransport TTransport
+
+ cfg *TConfiguration
+
+ // Used to keep track of the last field for the current and previous structs,
+ // so we can do the delta stuff.
+ lastField []int
+ lastFieldId int
+
+ // If we encounter a boolean field begin, save the TField here so it can
+ // have the value incorporated.
+ booleanFieldName string
+ booleanFieldId int16
+ booleanFieldPending bool
+
+ // If we read a field header, and it's a boolean field, save the boolean
+ // value here so that readBool can use it.
+ boolValue bool
+ boolValueIsNotNull bool
+ buffer [64]byte
+}
+
+// Deprecated: Use NewTCompactProtocolConf instead.
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+ return NewTCompactProtocolConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
+ PropagateTConfiguration(trans, conf)
+ p := &TCompactProtocol{
+ origTransport: trans,
+ cfg: conf,
+ }
+ if et, ok := trans.(TRichTransport); ok {
+ p.trans = et
+ } else {
+ p.trans = NewTRichTransport(trans)
+ }
+
+ return p
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
+ err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ _, err = p.writeVarint32(seqid)
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ e := p.WriteString(ctx, name)
+ return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
+ if len(p.lastField) <= 0 {
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
+ }
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+ if typeId == BOOL {
+ // we want to possibly include the value, so we'll wait.
+ p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+ return nil
+ }
+ _, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
+ return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
+ // short lastField = lastField_.pop();
+
+ // if there's a type override, use that.
+ var typeToWrite byte
+ if typeOverride == 0xFF {
+ typeToWrite = byte(p.getCompactType(typeId))
+ } else {
+ typeToWrite = typeOverride
+ }
+ // check if we can use delta encoding for the field id
+ fieldId := int(id)
+ written := 0
+ if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+ // write them together
+ err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ } else {
+ // write them separate
+ err := p.writeByteDirect(typeToWrite)
+ if err != nil {
+ return 0, err
+ }
+ err = p.WriteI16(ctx, id)
+ written = 1 + 2
+ if err != nil {
+ return 0, err
+ }
+ }
+
+ p.lastFieldId = fieldId
+ return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
+ err := p.writeByteDirect(STOP)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ if size == 0 {
+ err := p.writeByteDirect(0)
+ return NewTProtocolException(err)
+ }
+ _, err := p.writeVarint32(int32(size))
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ _, err := p.writeCollectionBegin(elemType, size)
+ return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
+ v := byte(COMPACT_BOOLEAN_FALSE)
+ if value {
+ v = byte(COMPACT_BOOLEAN_TRUE)
+ }
+ if p.booleanFieldPending {
+ // we haven't written the field header yet
+ _, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
+ p.booleanFieldPending = false
+ return NewTProtocolException(err)
+ }
+ // we're not part of a field, so just write the value.
+ err := p.writeByteDirect(v)
+ return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
+ err := p.writeByteDirect(byte(value))
+ return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+ return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
+ _, err := p.writeVarint32(p.int32ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
+ _, err := p.writeVarint64(p.int64ToZigzag(value))
+ return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
+ buf := p.buffer[0:8]
+ binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+ _, err := p.trans.Write(buf)
+ return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
+ _, e := p.writeVarint32(int32(len(value)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(value) > 0 {
+ }
+ _, e = p.trans.WriteString(value)
+ return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
+ _, e := p.writeVarint32(int32(len(bin)))
+ if e != nil {
+ return NewTProtocolException(e)
+ }
+ if len(bin) > 0 {
+ _, e = p.trans.Write(bin)
+ return NewTProtocolException(e)
+ }
+ return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+ var protocolId byte
+
+ _, deadlineSet := ctx.Deadline()
+ for {
+ protocolId, err = p.readByteDirect()
+ if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+ // keep retrying I/O timeout errors since we still have
+ // time left
+ continue
+ }
+ // For anything else, don't retry
+ break
+ }
+ if err != nil {
+ return
+ }
+
+ if protocolId != COMPACT_PROTOCOL_ID {
+ e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+ return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+ }
+
+ versionAndType, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ version := versionAndType & COMPACT_VERSION_MASK
+ typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+ if version != COMPACT_VERSION {
+ e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+ err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+ return
+ }
+ seqId, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ name, err = p.ReadString(ctx)
+ return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+ p.lastField = append(p.lastField, p.lastFieldId)
+ p.lastFieldId = 0
+ return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
+ // consume the last field we read off the wire.
+ if len(p.lastField) <= 0 {
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
+ }
+ p.lastFieldId = p.lastField[len(p.lastField)-1]
+ p.lastField = p.lastField[:len(p.lastField)-1]
+ return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
+ t, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+
+ // if it's a stop, then we can return immediately, as the struct is over.
+ if (t & 0x0f) == STOP {
+ return "", STOP, 0, nil
+ }
+
+ // mask off the 4 MSB of the type header. it could contain a field id delta.
+ modifier := int16((t & 0xf0) >> 4)
+ if modifier == 0 {
+ // not a delta. look ahead for the zigzag varint field id.
+ id, err = p.ReadI16(ctx)
+ if err != nil {
+ return
+ }
+ } else {
+ // has a delta. add the delta to the last read field id.
+ id = int16(p.lastFieldId) + modifier
+ }
+ typeId, e := p.getTType(tCompactType(t & 0x0f))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+
+ // if this happens to be a boolean field, the value is encoded in the type
+ if p.isBoolType(t) {
+ // save the boolean value in a special instance variable.
+ p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+ p.boolValueIsNotNull = true
+ }
+
+ // push the new field onto the field stack so we can keep the deltas going.
+ p.lastFieldId = int(id)
+ return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+ size32, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size32 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size32)
+
+ keyAndValueType := byte(STOP)
+ if size != 0 {
+ keyAndValueType, err = p.readByteDirect()
+ if err != nil {
+ return
+ }
+ }
+ keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+ valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+ return
+}
+
+func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+ size_and_type, err := p.readByteDirect()
+ if err != nil {
+ return
+ }
+ size = int((size_and_type >> 4) & 0x0f)
+ if size == 15 {
+ size2, e := p.readVarint32()
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ if size2 < 0 {
+ err = invalidDataLength
+ return
+ }
+ size = int(size2)
+ }
+ elemType, e := p.getTType(tCompactType(size_and_type))
+ if e != nil {
+ err = NewTProtocolException(e)
+ return
+ }
+ return
+}
+
+func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.ReadListBegin(ctx)
+}
+
+func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+ if p.boolValueIsNotNull {
+ p.boolValueIsNotNull = false
+ return p.boolValue, nil
+ }
+ v, err := p.readByteDirect()
+ return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
+ v, err := p.readByteDirect()
+ if err != nil {
+ return 0, NewTProtocolException(err)
+ }
+ return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+ v, err := p.ReadI32(ctx)
+ return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+ v, e := p.readVarint32()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt32(v)
+ return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+ v, e := p.readVarint64()
+ if e != nil {
+ return 0, NewTProtocolException(e)
+ }
+ value = p.zigzagToInt64(v)
+ return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+ longBits := p.buffer[0:8]
+ _, e := io.ReadFull(p.trans, longBits)
+ if e != nil {
+ return 0.0, NewTProtocolException(e)
+ }
+ return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return "", NewTProtocolException(e)
+ }
+ err = checkSizeForProtocol(length, p.cfg)
+ if err != nil {
+ return
+ }
+ if length == 0 {
+ return "", nil
+ }
+ if length < int32(len(p.buffer)) {
+ // Avoid allocation on small reads
+ buf := p.buffer[:length]
+ read, e := io.ReadFull(p.trans, buf)
+ return string(buf[:read]), NewTProtocolException(e)
+ }
+
+ buf, e := safeReadBytes(length, p.trans)
+ return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+ length, e := p.readVarint32()
+ if e != nil {
+ return nil, NewTProtocolException(e)
+ }
+ err = checkSizeForProtocol(length, p.cfg)
+ if err != nil {
+ return
+ }
+ if length == 0 {
+ return []byte{}, nil
+ }
+
+ buf, e := safeReadBytes(length, p.trans)
+ return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
+ return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+ return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+ if size <= 14 {
+ return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+ }
+ err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+ if err != nil {
+ return 0, err
+ }
+ m, err := p.writeVarint32(int32(size))
+ return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+ i32buf := p.buffer[0:5]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ i32buf[idx] = byte(n)
+ idx++
+ // p.writeByteDirect(byte(n));
+ break
+ // return;
+ } else {
+ i32buf[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ // p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+ u := uint32(n)
+ n = int32(u >> 7)
+ }
+ }
+ return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+ varint64out := p.buffer[0:10]
+ idx := 0
+ for {
+ if (n & ^0x7F) == 0 {
+ varint64out[idx] = byte(n)
+ idx++
+ break
+ } else {
+ varint64out[idx] = byte((n & 0x7F) | 0x80)
+ idx++
+ u := uint64(n)
+ n = int64(u >> 7)
+ }
+ }
+ return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+ return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+ return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+ binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+ return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+ return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+ // if the wire contains the right stuff, this will just truncate the i64 we
+ // read and get us the right sign.
+ v, err := p.readVarint64()
+ return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+ shift := uint(0)
+ result := int64(0)
+ for {
+ b, err := p.readByteDirect()
+ if err != nil {
+ return 0, err
+ }
+ result |= int64(b&0x7f) << shift
+ if (b & 0x80) != 0x80 {
+ break
+ }
+ shift += 7
+ }
+ return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+ return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+ u := uint32(n)
+ return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+ u := uint64(n)
+ return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+ return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+ return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+ return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+ switch byte(t) & 0x0f {
+ case STOP:
+ return STOP, nil
+ case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+ return BOOL, nil
+ case COMPACT_BYTE:
+ return BYTE, nil
+ case COMPACT_I16:
+ return I16, nil
+ case COMPACT_I32:
+ return I32, nil
+ case COMPACT_I64:
+ return I64, nil
+ case COMPACT_DOUBLE:
+ return DOUBLE, nil
+ case COMPACT_BINARY:
+ return STRING, nil
+ case COMPACT_LIST:
+ return LIST, nil
+ case COMPACT_SET:
+ return SET, nil
+ case COMPACT_MAP:
+ return MAP, nil
+ case COMPACT_STRUCT:
+ return STRUCT, nil
+ }
+ return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+ return ttypeToCompactType[t]
+}
+
+func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+ PropagateTConfiguration(p.origTransport, conf)
+ p.cfg = conf
+}
+
+var (
+ _ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
+ _ TConfigurationSetter = (*TCompactProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
new file mode 100644
index 0000000..454d9f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "crypto/tls"
+ "fmt"
+ "time"
+)
+
+// Default TConfiguration values.
+const (
+ DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
+ DEFAULT_MAX_FRAME_SIZE = 16384000
+
+ DEFAULT_TBINARY_STRICT_READ = false
+ DEFAULT_TBINARY_STRICT_WRITE = true
+
+ DEFAULT_CONNECT_TIMEOUT = 0
+ DEFAULT_SOCKET_TIMEOUT = 0
+)
+
+// TConfiguration defines some configurations shared between TTransport,
+// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
+//
+// When constructing TConfiguration, you only need to specify the non-default
+// fields. All zero values have sane default values.
+//
+// Not all configurations defined are applicable to all implementations.
+// Implementations are free to ignore the configurations not applicable to them.
+//
+// All functions attached to this type are nil-safe.
+//
+// See [1] for spec.
+//
+// NOTE: When using TConfiguration, fill in all the configurations you want to
+// set across the stack, not only the ones you want to set in the immediate
+// TTransport/TProtocol.
+//
+// For example, say you want to migrate this old code into using TConfiguration:
+//
+// sccket := thrift.NewTSocketTimeout("host:port", time.Second)
+// transFactory := thrift.NewTFramedTransportFactoryMaxLength(
+// thrift.NewTTransportFactory(),
+// 1024 * 1024 * 256,
+// )
+// protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
+//
+// This is the wrong way to do it because in the end the TConfiguration used by
+// socket and transFactory will be overwritten by the one used by protoFactory
+// because of TConfiguration propagation:
+//
+// // bad example, DO NOT USE
+// sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
+// ConnectTimeout: time.Second,
+// SocketTimeout: time.Second,
+// })
+// transFactory := thrift.NewTFramedTransportFactoryConf(
+// thrift.NewTTransportFactory(),
+// &thrift.TConfiguration{
+// MaxFrameSize: 1024 * 1024 * 256,
+// },
+// )
+// protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
+// TBinaryStrictRead: thrift.BoolPtr(true),
+// TBinaryStrictWrite: thrift.BoolPtr(true),
+// })
+//
+// This is the correct way to do it:
+//
+// conf := &thrift.TConfiguration{
+// ConnectTimeout: time.Second,
+// SocketTimeout: time.Second,
+//
+// MaxFrameSize: 1024 * 1024 * 256,
+//
+// TBinaryStrictRead: thrift.BoolPtr(true),
+// TBinaryStrictWrite: thrift.BoolPtr(true),
+// }
+// sccket := thrift.NewTSocketConf("host:port", conf)
+// transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
+// protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
+//
+// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
+type TConfiguration struct {
+ // If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
+ MaxMessageSize int32
+
+ // If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
+ //
+ // Also if MaxMessageSize < MaxFrameSize,
+ // MaxMessageSize will be used instead.
+ MaxFrameSize int32
+
+ // Connect and socket timeouts to be used by TSocket and TSSLSocket.
+ //
+ // 0 means no timeout.
+ //
+ // If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
+ // used.
+ ConnectTimeout time.Duration
+ SocketTimeout time.Duration
+
+ // TLS config to be used by TSSLSocket.
+ TLSConfig *tls.Config
+
+ // Strict read/write configurations for TBinaryProtocol.
+ //
+ // BoolPtr helper function is available to use literal values.
+ TBinaryStrictRead *bool
+ TBinaryStrictWrite *bool
+
+ // The wrapped protocol id to be used in THeader transport/protocol.
+ //
+ // THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
+ // are provided to help filling this value.
+ THeaderProtocolID *THeaderProtocolID
+
+ // Used internally by deprecated constructors, to avoid overriding
+ // underlying TTransport/TProtocol's cfg by accidental propagations.
+ //
+ // For external users this is always false.
+ noPropagation bool
+}
+
+// GetMaxMessageSize returns the max message size an implementation should
+// follow.
+//
+// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
+func (tc *TConfiguration) GetMaxMessageSize() int32 {
+ if tc == nil || tc.MaxMessageSize <= 0 {
+ return DEFAULT_MAX_MESSAGE_SIZE
+ }
+ return tc.MaxMessageSize
+}
+
+// GetMaxFrameSize returns the max frame size an implementation should follow.
+//
+// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
+//
+// If the configured max message size is smaller than the configured max frame
+// size, the smaller one will be returned instead.
+func (tc *TConfiguration) GetMaxFrameSize() int32 {
+ if tc == nil {
+ return DEFAULT_MAX_FRAME_SIZE
+ }
+ maxFrameSize := tc.MaxFrameSize
+ if maxFrameSize <= 0 {
+ maxFrameSize = DEFAULT_MAX_FRAME_SIZE
+ }
+ if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
+ return maxMessageSize
+ }
+ return maxFrameSize
+}
+
+// GetConnectTimeout returns the connect timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetConnectTimeout() time.Duration {
+ if tc == nil || tc.ConnectTimeout < 0 {
+ return DEFAULT_CONNECT_TIMEOUT
+ }
+ return tc.ConnectTimeout
+}
+
+// GetSocketTimeout returns the socket timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetSocketTimeout() time.Duration {
+ if tc == nil || tc.SocketTimeout < 0 {
+ return DEFAULT_SOCKET_TIMEOUT
+ }
+ return tc.SocketTimeout
+}
+
+// GetTLSConfig returns the tls config should be used by TSSLSocket.
+//
+// It's nil-safe. If tc is nil, nil will be returned instead.
+func (tc *TConfiguration) GetTLSConfig() *tls.Config {
+ if tc == nil {
+ return nil
+ }
+ return tc.TLSConfig
+}
+
+// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
+// tc.TBinaryStrictRead is nil.
+func (tc *TConfiguration) GetTBinaryStrictRead() bool {
+ if tc == nil || tc.TBinaryStrictRead == nil {
+ return DEFAULT_TBINARY_STRICT_READ
+ }
+ return *tc.TBinaryStrictRead
+}
+
+// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
+// tc.TBinaryStrictWrite is nil.
+func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
+ if tc == nil || tc.TBinaryStrictWrite == nil {
+ return DEFAULT_TBINARY_STRICT_WRITE
+ }
+ return *tc.TBinaryStrictWrite
+}
+
+// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
+// THeaderProtocol clients (for servers, they always use the same one as the
+// client instead).
+//
+// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
+// THeaderProtocolDefault will be returned instead.
+// THeaderProtocolDefault will also be returned if configured value is invalid.
+func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
+ if tc == nil || tc.THeaderProtocolID == nil {
+ return THeaderProtocolDefault
+ }
+ protoID := *tc.THeaderProtocolID
+ if err := protoID.Validate(); err != nil {
+ return THeaderProtocolDefault
+ }
+ return protoID
+}
+
+// THeaderProtocolIDPtr validates and returns the pointer to id.
+//
+// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
+// and the validation error will be returned.
+func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
+ err := id.Validate()
+ if err != nil {
+ id = THeaderProtocolDefault
+ }
+ return &id, err
+}
+
+// THeaderProtocolIDPtrMust validates and returns the pointer to id.
+//
+// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
+// instead of returning them.
+func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
+ ptr, err := THeaderProtocolIDPtr(id)
+ if err != nil {
+ panic(err)
+ }
+ return ptr
+}
+
+// TConfigurationSetter is an optional interface TProtocol, TTransport,
+// TProtocolFactory, TTransportFactory, and other implementations can implement.
+//
+// It's intended to be called during intializations.
+// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
+// middle of a message is undefined:
+// It may or may not change the behavior of the current processing message,
+// and it may even cause the current message to fail.
+//
+// Note for implementations: SetTConfiguration might be called multiple times
+// with the same value in quick successions due to the implementation of the
+// propagation. Implementations should make SetTConfiguration as simple as
+// possible (usually just overwrite the stored configuration and propagate it to
+// the wrapped TTransports/TProtocols).
+type TConfigurationSetter interface {
+ SetTConfiguration(*TConfiguration)
+}
+
+// PropagateTConfiguration propagates cfg to impl if impl implements
+// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
+//
+// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
+// with everything being default value, use &TConfiguration{} explicitly instead.
+func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
+ if cfg == nil || cfg.noPropagation {
+ return
+ }
+
+ if setter, ok := impl.(TConfigurationSetter); ok {
+ setter.SetTConfiguration(cfg)
+ }
+}
+
+func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
+ if size < 0 {
+ return NewTProtocolExceptionWithType(
+ NEGATIVE_SIZE,
+ fmt.Errorf("negative size: %d", size),
+ )
+ }
+ if size > cfg.GetMaxMessageSize() {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ fmt.Errorf("size exceeded max allowed: %d", size),
+ )
+ }
+ return nil
+}
+
+type tTransportFactoryConf struct {
+ delegate TTransportFactory
+ cfg *TConfiguration
+}
+
+func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
+ trans, err := f.delegate.GetTransport(orig)
+ if err == nil {
+ PropagateTConfiguration(orig, f.cfg)
+ PropagateTConfiguration(trans, f.cfg)
+ }
+ return trans, err
+}
+
+func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.delegate, f.cfg)
+ f.cfg = cfg
+}
+
+// TTransportFactoryConf wraps a TTransportFactory to propagate
+// TConfiguration on the factory's GetTransport calls.
+func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
+ return &tTransportFactoryConf{
+ delegate: delegate,
+ cfg: conf,
+ }
+}
+
+type tProtocolFactoryConf struct {
+ delegate TProtocolFactory
+ cfg *TConfiguration
+}
+
+func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
+ proto := f.delegate.GetProtocol(trans)
+ PropagateTConfiguration(trans, f.cfg)
+ PropagateTConfiguration(proto, f.cfg)
+ return proto
+}
+
+func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.delegate, f.cfg)
+ f.cfg = cfg
+}
+
+// TProtocolFactoryConf wraps a TProtocolFactory to propagate
+// TConfiguration on the factory's GetProtocol calls.
+func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
+ return &tProtocolFactoryConf{
+ delegate: delegate,
+ cfg: conf,
+ }
+}
+
+var (
+ _ TConfigurationSetter = (*tTransportFactoryConf)(nil)
+ _ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/context.go b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
new file mode 100644
index 0000000..d15c1bc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
new file mode 100644
index 0000000..53bf862
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+ error
+
+ TExceptionType() TExceptionType
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+ msg := prepend + err.Error()
+
+ var te TException
+ if errors.As(err, &te) {
+ switch te.TExceptionType() {
+ case TExceptionTypeTransport:
+ if t, ok := err.(TTransportException); ok {
+ return prependTTransportException(prepend, t)
+ }
+ case TExceptionTypeProtocol:
+ if t, ok := err.(TProtocolException); ok {
+ return prependTProtocolException(prepend, t)
+ }
+ case TExceptionTypeApplication:
+ var t TApplicationException
+ if errors.As(err, &t) {
+ return NewTApplicationException(t.TypeId(), msg)
+ }
+ }
+
+ return wrappedTException{
+ err: err,
+ msg: msg,
+ tExceptionType: te.TExceptionType(),
+ }
+ }
+
+ return errors.New(msg)
+}
+
+// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
+type TExceptionType byte
+
+// TExceptionType values
+const (
+ TExceptionTypeUnknown TExceptionType = iota
+ TExceptionTypeCompiled // TExceptions defined in thrift files and generated by thrift compiler
+ TExceptionTypeApplication // TApplicationExceptions
+ TExceptionTypeProtocol // TProtocolExceptions
+ TExceptionTypeTransport // TTransportExceptions
+)
+
+// WrapTException wraps an error into TException.
+//
+// If err is nil or already TException, it's returned as-is.
+// Otherwise it will be wraped into TException with TExceptionType() returning
+// TExceptionTypeUnknown, and Unwrap() returning the original error.
+func WrapTException(err error) TException {
+ if err == nil {
+ return nil
+ }
+
+ if te, ok := err.(TException); ok {
+ return te
+ }
+
+ return wrappedTException{
+ err: err,
+ msg: err.Error(),
+ tExceptionType: TExceptionTypeUnknown,
+ }
+}
+
+type wrappedTException struct {
+ err error
+ msg string
+ tExceptionType TExceptionType
+}
+
+func (w wrappedTException) Error() string {
+ return w.msg
+}
+
+func (w wrappedTException) TExceptionType() TExceptionType {
+ return w.tExceptionType
+}
+
+func (w wrappedTException) Unwrap() error {
+ return w.err
+}
+
+var _ TException = wrappedTException{}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
new file mode 100644
index 0000000..ac9bd48
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type (
+ headerKey string
+ headerKeyList int
+)
+
+// Values for headerKeyList.
+const (
+ headerKeyListRead headerKeyList = iota
+ headerKeyListWrite
+)
+
+// SetHeader sets a header in the context.
+func SetHeader(ctx context.Context, key, value string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKey(key),
+ value,
+ )
+}
+
+// UnsetHeader unsets a previously set header in the context.
+func UnsetHeader(ctx context.Context, key string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKey(key),
+ nil,
+ )
+}
+
+// GetHeader returns a value of the given header from the context.
+func GetHeader(ctx context.Context, key string) (value string, ok bool) {
+ if v := ctx.Value(headerKey(key)); v != nil {
+ value, ok = v.(string)
+ }
+ return
+}
+
+// SetReadHeaderList sets the key list of read THeaders in the context.
+func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKeyListRead,
+ keys,
+ )
+}
+
+// GetReadHeaderList returns the key list of read THeaders from the context.
+func GetReadHeaderList(ctx context.Context) []string {
+ if v := ctx.Value(headerKeyListRead); v != nil {
+ if value, ok := v.([]string); ok {
+ return value
+ }
+ }
+ return nil
+}
+
+// SetWriteHeaderList sets the key list of THeaders to write in the context.
+func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
+ return context.WithValue(
+ ctx,
+ headerKeyListWrite,
+ keys,
+ )
+}
+
+// GetWriteHeaderList returns the key list of THeaders to write from the context.
+func GetWriteHeaderList(ctx context.Context) []string {
+ if v := ctx.Value(headerKeyListWrite); v != nil {
+ if value, ok := v.([]string); ok {
+ return value
+ }
+ }
+ return nil
+}
+
+// AddReadTHeaderToContext adds the whole THeader headers into context.
+func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
+ keys := make([]string, 0, len(headers))
+ for key, value := range headers {
+ ctx = SetHeader(ctx, key, value)
+ keys = append(keys, key)
+ }
+ return SetReadHeaderList(ctx, keys)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
new file mode 100644
index 0000000..878041f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "errors"
+)
+
+// THeaderProtocol is a thrift protocol that implements THeader:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+//
+// It supports either binary or compact protocol as the wrapped protocol.
+//
+// Most of the THeader handlings are happening inside THeaderTransport.
+type THeaderProtocol struct {
+ transport *THeaderTransport
+
+ // Will be initialized on first read/write.
+ protocol TProtocol
+
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderProtocolConf instead.
+func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
+ return newTHeaderProtocolConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
+// transport with given TConfiguration.
+//
+// The passed in transport will be wrapped with THeaderTransport.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
+ return newTHeaderProtocolConf(trans, conf)
+}
+
+func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
+ t := NewTHeaderTransportConf(trans, cfg)
+ p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
+ PropagateTConfiguration(p, cfg)
+ return &THeaderProtocol{
+ transport: t,
+ protocol: p,
+ cfg: cfg,
+ }
+}
+
+type tHeaderProtocolFactory struct {
+ cfg *TConfiguration
+}
+
+func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return newTHeaderProtocolConf(trans, f.cfg)
+}
+
+func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
+ f.cfg = cfg
+}
+
+// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
+func NewTHeaderProtocolFactory() TProtocolFactory {
+ return NewTHeaderProtocolFactoryConf(&TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
+// TConfiguration.
+func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
+ return tHeaderProtocolFactory{
+ cfg: conf,
+ }
+}
+
+// Transport returns the underlying transport.
+//
+// It's guaranteed to be of type *THeaderTransport.
+func (p *THeaderProtocol) Transport() TTransport {
+ return p.transport
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
+ return p.transport.GetReadHeaders()
+}
+
+// SetWriteHeader sets a header for write.
+func (p *THeaderProtocol) SetWriteHeader(key, value string) {
+ p.transport.SetWriteHeader(key, value)
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (p *THeaderProtocol) ClearWriteHeaders() {
+ p.transport.ClearWriteHeaders()
+}
+
+// AddTransform add a transform for writing.
+func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
+ return p.transport.AddTransform(transform)
+}
+
+func (p *THeaderProtocol) Flush(ctx context.Context) error {
+ return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
+ newProto, err := p.transport.Protocol().GetProtocol(p.transport)
+ if err != nil {
+ return err
+ }
+ PropagateTConfiguration(newProto, p.cfg)
+ p.protocol = newProto
+ p.transport.SequenceID = seqID
+ return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
+}
+
+func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
+ if err := p.protocol.WriteMessageEnd(ctx); err != nil {
+ return err
+ }
+ return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
+ return p.protocol.WriteStructBegin(ctx, name)
+}
+
+func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
+ return p.protocol.WriteStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
+ return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
+}
+
+func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
+ return p.protocol.WriteFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
+ return p.protocol.WriteFieldStop(ctx)
+}
+
+func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
+}
+
+func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
+ return p.protocol.WriteMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ return p.protocol.WriteListBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
+ return p.protocol.WriteListEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ return p.protocol.WriteSetBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
+ return p.protocol.WriteSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
+ return p.protocol.WriteBool(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
+ return p.protocol.WriteByte(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
+ return p.protocol.WriteI16(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
+ return p.protocol.WriteI32(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
+ return p.protocol.WriteI64(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
+ return p.protocol.WriteDouble(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
+ return p.protocol.WriteString(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
+ return p.protocol.WriteBinary(ctx, value)
+}
+
+// ReadFrame calls underlying THeaderTransport's ReadFrame function.
+func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
+ return p.transport.ReadFrame(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
+ if err = p.transport.ReadFrame(ctx); err != nil {
+ return
+ }
+
+ var newProto TProtocol
+ newProto, err = p.transport.Protocol().GetProtocol(p.transport)
+ if err != nil {
+ var tAppExc TApplicationException
+ if !errors.As(err, &tAppExc) {
+ return
+ }
+ if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
+ return
+ }
+ if e := tAppExc.Write(ctx, p.protocol); e != nil {
+ return
+ }
+ if e := p.protocol.WriteMessageEnd(ctx); e != nil {
+ return
+ }
+ if e := p.transport.Flush(ctx); e != nil {
+ return
+ }
+ return
+ }
+ PropagateTConfiguration(newProto, p.cfg)
+ p.protocol = newProto
+
+ return p.protocol.ReadMessageBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
+ return p.protocol.ReadMessageEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+ return p.protocol.ReadStructBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
+ return p.protocol.ReadStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
+ return p.protocol.ReadFieldBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
+ return p.protocol.ReadFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+ return p.protocol.ReadMapBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
+ return p.protocol.ReadMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.protocol.ReadListBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
+ return p.protocol.ReadListEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+ return p.protocol.ReadSetBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
+ return p.protocol.ReadSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+ return p.protocol.ReadBool(ctx)
+}
+
+func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+ return p.protocol.ReadByte(ctx)
+}
+
+func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+ return p.protocol.ReadI16(ctx)
+}
+
+func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+ return p.protocol.ReadI32(ctx)
+}
+
+func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+ return p.protocol.ReadI64(ctx)
+}
+
+func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+ return p.protocol.ReadDouble(ctx)
+}
+
+func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
+ return p.protocol.ReadString(ctx)
+}
+
+func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+ return p.protocol.ReadBinary(ctx)
+}
+
+func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
+ return p.protocol.Skip(ctx, fieldType)
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(p.transport, cfg)
+ PropagateTConfiguration(p.protocol, cfg)
+ p.cfg = cfg
+}
+
+var (
+ _ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
+ _ TConfigurationSetter = (*THeaderProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
new file mode 100644
index 0000000..f5736df
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
@@ -0,0 +1,810 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bufio"
+ "bytes"
+ "compress/zlib"
+ "context"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+)
+
+// Size in bytes for 32-bit ints.
+const size32 = 4
+
+type headerMeta struct {
+ MagicFlags uint32
+ SequenceID int32
+ HeaderLength uint16
+}
+
+const headerMetaSize = 10
+
+type clientType int
+
+const (
+ clientUnknown clientType = iota
+ clientHeaders
+ clientFramedBinary
+ clientUnframedBinary
+ clientFramedCompact
+ clientUnframedCompact
+)
+
+// Constants defined in THeader format:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+const (
+ THeaderHeaderMagic uint32 = 0x0fff0000
+ THeaderHeaderMask uint32 = 0xffff0000
+ THeaderFlagsMask uint32 = 0x0000ffff
+ THeaderMaxFrameSize uint32 = 0x3fffffff
+)
+
+// THeaderMap is the type of the header map in THeader transport.
+type THeaderMap map[string]string
+
+// THeaderProtocolID is the wrapped protocol id used in THeader.
+type THeaderProtocolID int32
+
+// Supported THeaderProtocolID values.
+const (
+ THeaderProtocolBinary THeaderProtocolID = 0x00
+ THeaderProtocolCompact THeaderProtocolID = 0x02
+ THeaderProtocolDefault = THeaderProtocolBinary
+)
+
+// Declared globally to avoid repetitive allocations, not really used.
+var globalMemoryBuffer = NewTMemoryBuffer()
+
+// Validate checks whether the THeaderProtocolID is a valid/supported one.
+func (id THeaderProtocolID) Validate() error {
+ _, err := id.GetProtocol(globalMemoryBuffer)
+ return err
+}
+
+// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
+func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
+ switch id {
+ default:
+ return nil, NewTApplicationException(
+ INVALID_PROTOCOL,
+ fmt.Sprintf("THeader protocol id %d not supported", id),
+ )
+ case THeaderProtocolBinary:
+ return NewTBinaryProtocolTransport(trans), nil
+ case THeaderProtocolCompact:
+ return NewTCompactProtocol(trans), nil
+ }
+}
+
+// THeaderTransformID defines the numeric id of the transform used.
+type THeaderTransformID int32
+
+// THeaderTransformID values.
+//
+// Values not defined here are not currently supported, namely HMAC and Snappy.
+const (
+ TransformNone THeaderTransformID = iota // 0, no special handling
+ TransformZlib // 1, zlib
+)
+
+var supportedTransformIDs = map[THeaderTransformID]bool{
+ TransformNone: true,
+ TransformZlib: true,
+}
+
+// TransformReader is an io.ReadCloser that handles transforms reading.
+type TransformReader struct {
+ io.Reader
+
+ closers []io.Closer
+}
+
+var _ io.ReadCloser = (*TransformReader)(nil)
+
+// NewTransformReaderWithCapacity initializes a TransformReader with expected
+// closers capacity.
+//
+// If you don't know the closers capacity beforehand, just use
+//
+// &TransformReader{Reader: baseReader}
+//
+// instead would be sufficient.
+func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
+ return &TransformReader{
+ Reader: baseReader,
+ closers: make([]io.Closer, 0, capacity),
+ }
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tr *TransformReader) Close() error {
+ // Call closers in reversed order
+ for i := len(tr.closers) - 1; i >= 0; i-- {
+ if err := tr.closers[i].Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddTransform adds a transform.
+func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
+ switch id {
+ default:
+ return NewTApplicationException(
+ INVALID_TRANSFORM,
+ fmt.Sprintf("THeaderTransformID %d not supported", id),
+ )
+ case TransformNone:
+ // no-op
+ case TransformZlib:
+ readCloser, err := zlib.NewReader(tr.Reader)
+ if err != nil {
+ return err
+ }
+ tr.Reader = readCloser
+ tr.closers = append(tr.closers, readCloser)
+ }
+ return nil
+}
+
+// TransformWriter is an io.WriteCloser that handles transforms writing.
+type TransformWriter struct {
+ io.Writer
+
+ closers []io.Closer
+}
+
+var _ io.WriteCloser = (*TransformWriter)(nil)
+
+// NewTransformWriter creates a new TransformWriter with base writer and transforms.
+func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
+ writer := &TransformWriter{
+ Writer: baseWriter,
+ closers: make([]io.Closer, 0, len(transforms)),
+ }
+ for _, id := range transforms {
+ if err := writer.AddTransform(id); err != nil {
+ return nil, err
+ }
+ }
+ return writer, nil
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tw *TransformWriter) Close() error {
+ // Call closers in reversed order
+ for i := len(tw.closers) - 1; i >= 0; i-- {
+ if err := tw.closers[i].Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AddTransform adds a transform.
+func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
+ switch id {
+ default:
+ return NewTApplicationException(
+ INVALID_TRANSFORM,
+ fmt.Sprintf("THeaderTransformID %d not supported", id),
+ )
+ case TransformNone:
+ // no-op
+ case TransformZlib:
+ writeCloser := zlib.NewWriter(tw.Writer)
+ tw.Writer = writeCloser
+ tw.closers = append(tw.closers, writeCloser)
+ }
+ return nil
+}
+
+// THeaderInfoType is the type id of the info headers.
+type THeaderInfoType int32
+
+// Supported THeaderInfoType values.
+const (
+ _ THeaderInfoType = iota // Skip 0
+ InfoKeyValue // 1
+ // Rest of the info types are not supported.
+)
+
+// THeaderTransport is a Transport mode that implements THeader.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+type THeaderTransport struct {
+ SequenceID int32
+ Flags uint32
+
+ transport TTransport
+
+ // THeaderMap for read and write
+ readHeaders THeaderMap
+ writeHeaders THeaderMap
+
+ // Reading related variables.
+ reader *bufio.Reader
+ // When frame is detected, we read the frame fully into frameBuffer.
+ frameBuffer bytes.Buffer
+ // When it's non-nil, Read should read from frameReader instead of
+ // reader, and EOF error indicates end of frame instead of end of all
+ // transport.
+ frameReader io.ReadCloser
+
+ // Writing related variables
+ writeBuffer bytes.Buffer
+ writeTransforms []THeaderTransformID
+
+ clientType clientType
+ protocolID THeaderProtocolID
+ cfg *TConfiguration
+
+ // buffer is used in the following scenarios to avoid repetitive
+ // allocations, while 4 is big enough for all those scenarios:
+ //
+ // * header padding (max size 4)
+ // * write the frame size (size 4)
+ buffer [4]byte
+}
+
+var _ TTransport = (*THeaderTransport)(nil)
+
+// Deprecated: Use NewTHeaderTransportConf instead.
+func NewTHeaderTransport(trans TTransport) *THeaderTransport {
+ return NewTHeaderTransportConf(trans, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderTransportConf creates THeaderTransport from the
+// underlying transport, with given TConfiguration attached.
+//
+// If trans is already a *THeaderTransport, it will be returned as is,
+// but with TConfiguration overridden by the value passed in.
+//
+// The protocol ID in TConfiguration is only useful for client transports.
+// For servers,
+// the protocol ID will be overridden again to the one set by the client,
+// to ensure that servers always speak the same dialect as the client.
+func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
+ if ht, ok := trans.(*THeaderTransport); ok {
+ ht.SetTConfiguration(conf)
+ return ht
+ }
+ PropagateTConfiguration(trans, conf)
+ return &THeaderTransport{
+ transport: trans,
+ reader: bufio.NewReader(trans),
+ writeHeaders: make(THeaderMap),
+ protocolID: conf.GetTHeaderProtocolID(),
+ cfg: conf,
+ }
+}
+
+// Open calls the underlying transport's Open function.
+func (t *THeaderTransport) Open() error {
+ return t.transport.Open()
+}
+
+// IsOpen calls the underlying transport's IsOpen function.
+func (t *THeaderTransport) IsOpen() bool {
+ return t.transport.IsOpen()
+}
+
+// ReadFrame tries to read the frame header, guess the client type, and handle
+// unframed clients.
+func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
+ if !t.needReadFrame() {
+ // No need to read frame, skipping.
+ return nil
+ }
+
+ // Peek and handle the first 32 bits.
+ // They could either be the length field of a framed message,
+ // or the first bytes of an unframed message.
+ var buf []byte
+ var err error
+ // This is also usually the first read from a connection,
+ // so handle retries around socket timeouts.
+ _, deadlineSet := ctx.Deadline()
+ for {
+ buf, err = t.reader.Peek(size32)
+ if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+ // This is I/O timeout and we still have time,
+ // continue trying
+ continue
+ }
+ // For anything else, do not retry
+ break
+ }
+ if err != nil {
+ return err
+ }
+
+ frameSize := binary.BigEndian.Uint32(buf)
+ if frameSize&VERSION_MASK == VERSION_1 {
+ t.clientType = clientUnframedBinary
+ return nil
+ }
+ if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+ t.clientType = clientUnframedCompact
+ return nil
+ }
+
+ // At this point it should be a framed message,
+ // sanity check on frameSize then discard the peeked part.
+ if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ errors.New("frame too large"),
+ )
+ }
+ t.reader.Discard(size32)
+
+ // Read the frame fully into frameBuffer.
+ _, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
+ if err != nil {
+ return err
+ }
+ t.frameReader = ioutil.NopCloser(&t.frameBuffer)
+
+ // Peek and handle the next 32 bits.
+ buf = t.frameBuffer.Bytes()[:size32]
+ version := binary.BigEndian.Uint32(buf)
+ if version&THeaderHeaderMask == THeaderHeaderMagic {
+ t.clientType = clientHeaders
+ return t.parseHeaders(ctx, frameSize)
+ }
+ if version&VERSION_MASK == VERSION_1 {
+ t.clientType = clientFramedBinary
+ return nil
+ }
+ if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+ t.clientType = clientFramedCompact
+ return nil
+ }
+ if err := t.endOfFrame(); err != nil {
+ return err
+ }
+ return NewTProtocolExceptionWithType(
+ NOT_IMPLEMENTED,
+ errors.New("unsupported client transport type"),
+ )
+}
+
+// endOfFrame does end of frame handling.
+//
+// It closes frameReader, and also resets frame related states.
+func (t *THeaderTransport) endOfFrame() error {
+ defer func() {
+ t.frameBuffer.Reset()
+ t.frameReader = nil
+ }()
+ return t.frameReader.Close()
+}
+
+func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
+ if t.clientType != clientHeaders {
+ return nil
+ }
+
+ var err error
+ var meta headerMeta
+ if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
+ return err
+ }
+ frameSize -= headerMetaSize
+ t.Flags = meta.MagicFlags & THeaderFlagsMask
+ t.SequenceID = meta.SequenceID
+ headerLength := int64(meta.HeaderLength) * 4
+ if int64(frameSize) < headerLength {
+ return NewTProtocolExceptionWithType(
+ SIZE_LIMIT,
+ errors.New("header size is larger than the whole frame"),
+ )
+ }
+ headerBuf := NewTMemoryBuffer()
+ _, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
+ if err != nil {
+ return err
+ }
+ hp := NewTCompactProtocol(headerBuf)
+ hp.SetTConfiguration(t.cfg)
+
+ // At this point the header is already read into headerBuf,
+ // and t.frameBuffer starts from the actual payload.
+ protoID, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ t.protocolID = THeaderProtocolID(protoID)
+
+ var transformCount int32
+ transformCount, err = hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ if transformCount > 0 {
+ reader := NewTransformReaderWithCapacity(
+ &t.frameBuffer,
+ int(transformCount),
+ )
+ t.frameReader = reader
+ transformIDs := make([]THeaderTransformID, transformCount)
+ for i := 0; i < int(transformCount); i++ {
+ id, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ transformIDs[i] = THeaderTransformID(id)
+ }
+ // The transform IDs on the wire was added based on the order of
+ // writing, so on the reading side we need to reverse the order.
+ for i := transformCount - 1; i >= 0; i-- {
+ id := transformIDs[i]
+ if err := reader.AddTransform(id); err != nil {
+ return err
+ }
+ }
+ }
+
+ // The info part does not use the transforms yet, so it's
+ // important to continue using headerBuf.
+ headers := make(THeaderMap)
+ for {
+ infoType, err := hp.readVarint32()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ if THeaderInfoType(infoType) == InfoKeyValue {
+ count, err := hp.readVarint32()
+ if err != nil {
+ return err
+ }
+ for i := 0; i < int(count); i++ {
+ key, err := hp.ReadString(ctx)
+ if err != nil {
+ return err
+ }
+ value, err := hp.ReadString(ctx)
+ if err != nil {
+ return err
+ }
+ headers[key] = value
+ }
+ } else {
+ // Skip reading info section on the first
+ // unsupported info type.
+ break
+ }
+ }
+ t.readHeaders = headers
+
+ return nil
+}
+
+func (t *THeaderTransport) needReadFrame() bool {
+ if t.clientType == clientUnknown {
+ // This is a new connection that's never read before.
+ return true
+ }
+ if t.isFramed() && t.frameReader == nil {
+ // We just finished the last frame.
+ return true
+ }
+ return false
+}
+
+func (t *THeaderTransport) Read(p []byte) (read int, err error) {
+ // Here using context.Background instead of a context passed in is safe.
+ // First is that there's no way to pass context into this function.
+ // Then, 99% of the case when calling this Read frame is already read
+ // into frameReader. ReadFrame here is more of preventing bugs that
+ // didn't call ReadFrame before calling Read.
+ err = t.ReadFrame(context.Background())
+ if err != nil {
+ return
+ }
+ if t.frameReader != nil {
+ read, err = t.frameReader.Read(p)
+ if err == nil && t.frameBuffer.Len() <= 0 {
+ // the last Read finished the frame, do endOfFrame
+ // handling here.
+ err = t.endOfFrame()
+ } else if err == io.EOF {
+ err = t.endOfFrame()
+ if err != nil {
+ return
+ }
+ if read == 0 {
+ // Try to read the next frame when we hit EOF
+ // (end of frame) immediately.
+ // When we got here, it means the last read
+ // finished the previous frame, but didn't
+ // do endOfFrame handling yet.
+ // We have to read the next frame here,
+ // as otherwise we would return 0 and nil,
+ // which is a case not handled well by most
+ // protocol implementations.
+ return t.Read(p)
+ }
+ }
+ return
+ }
+ return t.reader.Read(p)
+}
+
+// Write writes data to the write buffer.
+//
+// You need to call Flush to actually write them to the transport.
+func (t *THeaderTransport) Write(p []byte) (int, error) {
+ return t.writeBuffer.Write(p)
+}
+
+// Flush writes the appropriate header and the write buffer to the underlying transport.
+func (t *THeaderTransport) Flush(ctx context.Context) error {
+ if t.writeBuffer.Len() == 0 {
+ return nil
+ }
+
+ defer t.writeBuffer.Reset()
+
+ switch t.clientType {
+ default:
+ fallthrough
+ case clientUnknown:
+ t.clientType = clientHeaders
+ fallthrough
+ case clientHeaders:
+ headers := NewTMemoryBuffer()
+ hp := NewTCompactProtocol(headers)
+ hp.SetTConfiguration(t.cfg)
+ if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ for _, transform := range t.writeTransforms {
+ if _, err := hp.writeVarint32(int32(transform)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+ if len(t.writeHeaders) > 0 {
+ if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ for key, value := range t.writeHeaders {
+ if err := hp.WriteString(ctx, key); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if err := hp.WriteString(ctx, value); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+ }
+ padding := 4 - headers.Len()%4
+ if padding < 4 {
+ buf := t.buffer[:padding]
+ for i := range buf {
+ buf[i] = 0
+ }
+ if _, err := headers.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+
+ var payload bytes.Buffer
+ meta := headerMeta{
+ MagicFlags: THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
+ SequenceID: t.SequenceID,
+ HeaderLength: uint16(headers.Len() / 4),
+ }
+ if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := io.Copy(&payload, headers); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ writer, err := NewTransformWriter(&payload, t.writeTransforms)
+ if err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ if err := writer.Close(); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ // First write frame length
+ buf := t.buffer[:size32]
+ binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
+ if _, err := t.transport.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ // Then write the payload
+ if _, err := io.Copy(t.transport, &payload); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+
+ case clientFramedBinary, clientFramedCompact:
+ buf := t.buffer[:size32]
+ binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
+ if _, err := t.transport.Write(buf); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ fallthrough
+ case clientUnframedBinary, clientUnframedCompact:
+ if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
+ return NewTTransportExceptionFromError(err)
+ }
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ return NewTTransportExceptionFromError(ctx.Err())
+ }
+
+ return t.transport.Flush(ctx)
+}
+
+// Close closes the transport, along with its underlying transport.
+func (t *THeaderTransport) Close() error {
+ if err := t.Flush(context.Background()); err != nil {
+ return err
+ }
+ return t.transport.Close()
+}
+
+// RemainingBytes calls underlying transport's RemainingBytes.
+//
+// Even in framed cases, because of all the possible compression transforms
+// involved, the remaining frame size is likely to be different from the actual
+// remaining readable bytes, so we don't bother to keep tracking the remaining
+// frame size by ourselves and just use the underlying transport's
+// RemainingBytes directly.
+func (t *THeaderTransport) RemainingBytes() uint64 {
+ return t.transport.RemainingBytes()
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (t *THeaderTransport) GetReadHeaders() THeaderMap {
+ return t.readHeaders
+}
+
+// SetWriteHeader sets a header for write.
+func (t *THeaderTransport) SetWriteHeader(key, value string) {
+ t.writeHeaders[key] = value
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (t *THeaderTransport) ClearWriteHeaders() {
+ t.writeHeaders = make(THeaderMap)
+}
+
+// AddTransform add a transform for writing.
+func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
+ if !supportedTransformIDs[transform] {
+ return NewTProtocolExceptionWithType(
+ NOT_IMPLEMENTED,
+ fmt.Errorf("THeaderTransformID %d not supported", transform),
+ )
+ }
+ t.writeTransforms = append(t.writeTransforms, transform)
+ return nil
+}
+
+// Protocol returns the wrapped protocol id used in this THeaderTransport.
+func (t *THeaderTransport) Protocol() THeaderProtocolID {
+ switch t.clientType {
+ default:
+ return t.protocolID
+ case clientFramedBinary, clientUnframedBinary:
+ return THeaderProtocolBinary
+ case clientFramedCompact, clientUnframedCompact:
+ return THeaderProtocolCompact
+ }
+}
+
+func (t *THeaderTransport) isFramed() bool {
+ switch t.clientType {
+ default:
+ return false
+ case clientHeaders, clientFramedBinary, clientFramedCompact:
+ return true
+ }
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(t.transport, cfg)
+ t.cfg = cfg
+}
+
+// THeaderTransportFactory is a TTransportFactory implementation to create
+// THeaderTransport.
+//
+// It also implements TConfigurationSetter.
+type THeaderTransportFactory struct {
+ // The underlying factory, could be nil.
+ Factory TTransportFactory
+
+ cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderTransportFactoryConf instead.
+func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
+ return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
+ noPropagation: true,
+ })
+}
+
+// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
+// the given *TConfiguration.
+func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+ return &THeaderTransportFactory{
+ Factory: factory,
+
+ cfg: conf,
+ }
+}
+
+// GetTransport implements TTransportFactory.
+func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+ if f.Factory != nil {
+ t, err := f.Factory.GetTransport(trans)
+ if err != nil {
+ return nil, err
+ }
+ return NewTHeaderTransportConf(t, f.cfg), nil
+ }
+ return NewTHeaderTransportConf(trans, f.cfg), nil
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+ PropagateTConfiguration(f.Factory, f.cfg)
+ f.cfg = cfg
+}
+
+var (
+ _ TConfigurationSetter = (*THeaderTransportFactory)(nil)
+ _ TConfigurationSetter = (*THeaderTransport)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
new file mode 100644
index 0000000..50d44ec
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "log"
+ "os"
+)
+
+// Logger is a simple wrapper of a logging function.
+//
+// In reality the users might actually use different logging libraries, and they
+// are not always compatible with each other.
+//
+// Logger is meant to be a simple common ground that it's easy to wrap whatever
+// logging library they use into.
+//
+// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
+// discussion behind it.
+type Logger func(msg string)
+
+// NopLogger is a Logger implementation that does nothing.
+func NopLogger(msg string) {}
+
+// StdLogger wraps stdlib log package into a Logger.
+//
+// If logger passed in is nil, it will fallback to use stderr and default flags.
+func StdLogger(logger *log.Logger) Logger {
+ if logger == nil {
+ logger = log.New(os.Stderr, "", log.LstdFlags)
+ }
+ return func(msg string) {
+ logger.Print(msg)
+ }
+}
+
+func fallbackLogger(logger Logger) Logger {
+ if logger == nil {
+ return StdLogger(nil)
+ }
+ return logger
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
new file mode 100644
index 0000000..5936d27
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bytes"
+ "context"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+ *bytes.Buffer
+ size int
+}
+
+type TMemoryBufferTransportFactory struct {
+ size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+ if trans != nil {
+ t, ok := trans.(*TMemoryBuffer)
+ if ok && t.size > 0 {
+ return NewTMemoryBufferLen(t.size), nil
+ }
+ }
+ return NewTMemoryBufferLen(p.size), nil
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+ return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+ return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+ buf := make([]byte, 0, size)
+ return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+ return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+ return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+ p.Buffer.Reset()
+ return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush(ctx context.Context) error {
+ return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+ return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
new file mode 100644
index 0000000..25ab2e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+ INVALID_TMESSAGE_TYPE TMessageType = 0
+ CALL TMessageType = 1
+ REPLY TMessageType = 2
+ EXCEPTION TMessageType = 3
+ ONEWAY TMessageType = 4
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
new file mode 100644
index 0000000..e4512d2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "math"
+ "strconv"
+)
+
+type Numeric interface {
+ Int64() int64
+ Int32() int32
+ Int16() int16
+ Byte() byte
+ Int() int
+ Float64() float64
+ Float32() float32
+ String() string
+ isNull() bool
+}
+
+type numeric struct {
+ iValue int64
+ dValue float64
+ sValue string
+ isNil bool
+}
+
+var (
+ INFINITY Numeric
+ NEGATIVE_INFINITY Numeric
+ NAN Numeric
+ ZERO Numeric
+ NUMERIC_NULL Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+ if math.IsInf(dValue, 1) {
+ return INFINITY
+ }
+ if math.IsInf(dValue, -1) {
+ return NEGATIVE_INFINITY
+ }
+ if math.IsNaN(dValue) {
+ return NAN
+ }
+ iValue := int64(dValue)
+ sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+ dValue := float64(iValue)
+ sValue := strconv.FormatInt(iValue, 10)
+ isNil := false
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+ dValue := float64(iValue)
+ sValue := strconv.FormatInt(int64(iValue), 10)
+ isNil := false
+ return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+ if sValue == INFINITY.String() {
+ return INFINITY
+ }
+ if sValue == NEGATIVE_INFINITY.String() {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == NAN.String() {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ isNil := len(sValue) == 0
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+ if isNull {
+ return NewNullNumeric()
+ }
+ if sValue == JSON_INFINITY {
+ return INFINITY
+ }
+ if sValue == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY
+ }
+ if sValue == JSON_NAN {
+ return NAN
+ }
+ iValue, _ := strconv.ParseInt(sValue, 10, 64)
+ dValue, _ := strconv.ParseFloat(sValue, 64)
+ return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+ return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+ return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+ return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+ return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+ return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+ return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+ return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+ return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+ return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+ return p.isNil
+}
+
+func init() {
+ INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+ NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+ NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+ ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+ NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
new file mode 100644
index 0000000..245a3cc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+ Process(ctx context.Context, in, out TProtocol) (bool, TException)
+
+ // ProcessorMap returns a map of thrift method names to TProcessorFunctions.
+ ProcessorMap() map[string]TProcessorFunction
+
+ // AddToProcessorMap adds the given TProcessorFunction to the internal
+ // processor map at the given key.
+ //
+ // If one is already set at the given key, it will be replaced with the new
+ // TProcessorFunction.
+ AddToProcessorMap(string, TProcessorFunction)
+}
+
+type TProcessorFunction interface {
+ Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+ GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+ processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+ return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+ return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+ GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+ processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+ return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+ return p.processor
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
new file mode 100644
index 0000000..0a69bd4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "errors"
+ "fmt"
+)
+
+const (
+ VERSION_MASK = 0xffff0000
+ VERSION_1 = 0x80010000
+)
+
+type TProtocol interface {
+ WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
+ WriteMessageEnd(ctx context.Context) error
+ WriteStructBegin(ctx context.Context, name string) error
+ WriteStructEnd(ctx context.Context) error
+ WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
+ WriteFieldEnd(ctx context.Context) error
+ WriteFieldStop(ctx context.Context) error
+ WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
+ WriteMapEnd(ctx context.Context) error
+ WriteListBegin(ctx context.Context, elemType TType, size int) error
+ WriteListEnd(ctx context.Context) error
+ WriteSetBegin(ctx context.Context, elemType TType, size int) error
+ WriteSetEnd(ctx context.Context) error
+ WriteBool(ctx context.Context, value bool) error
+ WriteByte(ctx context.Context, value int8) error
+ WriteI16(ctx context.Context, value int16) error
+ WriteI32(ctx context.Context, value int32) error
+ WriteI64(ctx context.Context, value int64) error
+ WriteDouble(ctx context.Context, value float64) error
+ WriteString(ctx context.Context, value string) error
+ WriteBinary(ctx context.Context, value []byte) error
+
+ ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
+ ReadMessageEnd(ctx context.Context) error
+ ReadStructBegin(ctx context.Context) (name string, err error)
+ ReadStructEnd(ctx context.Context) error
+ ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
+ ReadFieldEnd(ctx context.Context) error
+ ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
+ ReadMapEnd(ctx context.Context) error
+ ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
+ ReadListEnd(ctx context.Context) error
+ ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
+ ReadSetEnd(ctx context.Context) error
+ ReadBool(ctx context.Context) (value bool, err error)
+ ReadByte(ctx context.Context) (value int8, err error)
+ ReadI16(ctx context.Context) (value int16, err error)
+ ReadI32(ctx context.Context) (value int32, err error)
+ ReadI64(ctx context.Context) (value int64, err error)
+ ReadDouble(ctx context.Context) (value float64, err error)
+ ReadString(ctx context.Context) (value string, err error)
+ ReadBinary(ctx context.Context) (value []byte, err error)
+
+ Skip(ctx context.Context, fieldType TType) (err error)
+ Flush(ctx context.Context) (err error)
+
+ Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
+ return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+ if maxDepth <= 0 {
+ return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+ }
+
+ switch fieldType {
+ case BOOL:
+ _, err = self.ReadBool(ctx)
+ return
+ case BYTE:
+ _, err = self.ReadByte(ctx)
+ return
+ case I16:
+ _, err = self.ReadI16(ctx)
+ return
+ case I32:
+ _, err = self.ReadI32(ctx)
+ return
+ case I64:
+ _, err = self.ReadI64(ctx)
+ return
+ case DOUBLE:
+ _, err = self.ReadDouble(ctx)
+ return
+ case STRING:
+ _, err = self.ReadString(ctx)
+ return
+ case STRUCT:
+ if _, err = self.ReadStructBegin(ctx); err != nil {
+ return err
+ }
+ for {
+ _, typeId, _, _ := self.ReadFieldBegin(ctx)
+ if typeId == STOP {
+ break
+ }
+ err := Skip(ctx, self, typeId, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.ReadFieldEnd(ctx)
+ }
+ return self.ReadStructEnd(ctx)
+ case MAP:
+ keyType, valueType, size, err := self.ReadMapBegin(ctx)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(ctx, self, keyType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ self.Skip(ctx, valueType)
+ }
+ return self.ReadMapEnd(ctx)
+ case SET:
+ elemType, size, err := self.ReadSetBegin(ctx)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(ctx, self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadSetEnd(ctx)
+ case LIST:
+ elemType, size, err := self.ReadListBegin(ctx)
+ if err != nil {
+ return err
+ }
+ for i := 0; i < size; i++ {
+ err := Skip(ctx, self, elemType, maxDepth-1)
+ if err != nil {
+ return err
+ }
+ }
+ return self.ReadListEnd(ctx)
+ default:
+ return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
new file mode 100644
index 0000000..9dcf4bf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "encoding/base64"
+ "errors"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+ TException
+ TypeId() int
+}
+
+const (
+ UNKNOWN_PROTOCOL_EXCEPTION = 0
+ INVALID_DATA = 1
+ NEGATIVE_SIZE = 2
+ SIZE_LIMIT = 3
+ BAD_VERSION = 4
+ NOT_IMPLEMENTED = 5
+ DEPTH_LIMIT = 6
+)
+
+type tProtocolException struct {
+ typeId int
+ err error
+ msg string
+}
+
+var _ TProtocolException = (*tProtocolException)(nil)
+
+func (tProtocolException) TExceptionType() TExceptionType {
+ return TExceptionTypeProtocol
+}
+
+func (p *tProtocolException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+ return p.msg
+}
+
+func (p *tProtocolException) Error() string {
+ return p.msg
+}
+
+func (p *tProtocolException) Unwrap() error {
+ return p.err
+}
+
+func NewTProtocolException(err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+
+ if e, ok := err.(TProtocolException); ok {
+ return e
+ }
+
+ if errors.As(err, new(base64.CorruptInputError)) {
+ return NewTProtocolExceptionWithType(INVALID_DATA, err)
+ }
+
+ return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+ if err == nil {
+ return nil
+ }
+ return &tProtocolException{
+ typeId: errType,
+ err: err,
+ msg: err.Error(),
+ }
+}
+
+func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
+ return &tProtocolException{
+ typeId: err.TypeId(),
+ err: err,
+ msg: prepend + err.Error(),
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
new file mode 100644
index 0000000..c40f796
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+ GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
new file mode 100644
index 0000000..d884c6a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type responseHelperKey struct{}
+
+// TResponseHelper defines a object with a set of helper functions that can be
+// retrieved from the context object passed into server handler functions.
+//
+// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
+// from the context object.
+//
+// The zero value of TResponseHelper is valid with all helper functions being
+// no-op.
+type TResponseHelper struct {
+ // THeader related functions
+ *THeaderResponseHelper
+}
+
+// THeaderResponseHelper defines THeader related TResponseHelper functions.
+//
+// The zero value of *THeaderResponseHelper is valid with all helper functions
+// being no-op.
+type THeaderResponseHelper struct {
+ proto *THeaderProtocol
+}
+
+// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
+// underlying TProtocol.
+func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
+ if hp, ok := proto.(*THeaderProtocol); ok {
+ return &THeaderResponseHelper{
+ proto: hp,
+ }
+ }
+ return nil
+}
+
+// SetHeader sets a response header.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) SetHeader(key, value string) {
+ if h != nil && h.proto != nil {
+ h.proto.SetWriteHeader(key, value)
+ }
+}
+
+// ClearHeaders clears all the response headers previously set.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) ClearHeaders() {
+ if h != nil && h.proto != nil {
+ h.proto.ClearWriteHeaders()
+ }
+}
+
+// GetResponseHelper retrieves the TResponseHelper implementation injected into
+// the context object.
+//
+// If no helper was found in the context object, a nop helper with ok == false
+// will be returned.
+func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
+ if v := ctx.Value(responseHelperKey{}); v != nil {
+ helper, ok = v.(TResponseHelper)
+ }
+ return
+}
+
+// SetResponseHelper injects TResponseHelper into the context object.
+func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
+ return context.WithValue(ctx, responseHelperKey{}, helper)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
new file mode 100644
index 0000000..83fdf29
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+type RichTransport struct {
+ TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+ return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+ return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+ return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+ return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+ return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+ v := [1]byte{0}
+ n, err := r.Read(v[0:1])
+ if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
+ return v[0], nil
+ }
+ if n > 0 && err != nil {
+ return v[0], err
+ }
+ if err != nil {
+ return 0, err
+ }
+ return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+ v := [1]byte{c}
+ _, err := w.Write(v[0:1])
+ return err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
new file mode 100644
index 0000000..c449790
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "sync"
+)
+
+type TSerializer struct {
+ Transport *TMemoryBuffer
+ Protocol TProtocol
+}
+
+type TStruct interface {
+ Write(ctx context.Context, p TProtocol) error
+ Read(ctx context.Context, p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+ transport := NewTMemoryBufferLen(1024)
+ protocol := NewTBinaryProtocolTransport(transport)
+
+ return &TSerializer{
+ Transport: transport,
+ Protocol: protocol,
+ }
+}
+
+func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(ctx, t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(ctx); err != nil {
+ return
+ }
+ if err = t.Transport.Flush(ctx); err != nil {
+ return
+ }
+
+ return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
+ t.Transport.Reset()
+
+ if err = msg.Write(ctx, t.Protocol); err != nil {
+ return
+ }
+
+ if err = t.Protocol.Flush(ctx); err != nil {
+ return
+ }
+
+ if err = t.Transport.Flush(ctx); err != nil {
+ return
+ }
+
+ b = append(b, t.Transport.Bytes()...)
+ return
+}
+
+// TSerializerPool is the thread-safe version of TSerializer, it uses resource
+// pool of TSerializer under the hood.
+//
+// It must be initialized with either NewTSerializerPool or
+// NewTSerializerPoolSizeFactory.
+type TSerializerPool struct {
+ pool sync.Pool
+}
+
+// NewTSerializerPool creates a new TSerializerPool.
+//
+// NewTSerializer can be used as the arg here.
+func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
+ return &TSerializerPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ return f()
+ },
+ },
+ }
+}
+
+// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
+// size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
+ return &TSerializerPool{
+ pool: sync.Pool{
+ New: func() interface{} {
+ transport := NewTMemoryBufferLen(size)
+ protocol := factory.GetProtocol(transport)
+
+ return &TSerializer{
+ Transport: transport,
+ Protocol: protocol,
+ }
+ },
+ },
+ }
+}
+
+func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
+ s := t.pool.Get().(*TSerializer)
+ defer t.pool.Put(s)
+ return s.WriteString(ctx, msg)
+}
+
+func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
+ s := t.pool.Get().(*TSerializer)
+ defer t.pool.Put(s)
+ return s.Write(ctx, msg)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
new file mode 100644
index 0000000..51c40b6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+ Listen() error
+ Accept() (TTransport, error)
+ Close() error
+
+ // Optional method implementation. This signals to the server transport
+ // that it should break out of any accept() or listen() that it is currently
+ // blocked on. This method, if implemented, MUST be thread safe, as it may
+ // be called from a different thread context than the other TServerTransport
+ // methods.
+ Interrupt() error
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
new file mode 100644
index 0000000..d1a8154
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -0,0 +1,1373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "strconv"
+)
+
+type _ParseContext int
+
+const (
+ _CONTEXT_INVALID _ParseContext = iota
+ _CONTEXT_IN_TOPLEVEL // 1
+ _CONTEXT_IN_LIST_FIRST // 2
+ _CONTEXT_IN_LIST // 3
+ _CONTEXT_IN_OBJECT_FIRST // 4
+ _CONTEXT_IN_OBJECT_NEXT_KEY // 5
+ _CONTEXT_IN_OBJECT_NEXT_VALUE // 6
+)
+
+func (p _ParseContext) String() string {
+ switch p {
+ case _CONTEXT_IN_TOPLEVEL:
+ return "TOPLEVEL"
+ case _CONTEXT_IN_LIST_FIRST:
+ return "LIST-FIRST"
+ case _CONTEXT_IN_LIST:
+ return "LIST"
+ case _CONTEXT_IN_OBJECT_FIRST:
+ return "OBJECT-FIRST"
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ return "OBJECT-NEXT-KEY"
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ return "OBJECT-NEXT-VALUE"
+ }
+ return "UNKNOWN-PARSE-CONTEXT"
+}
+
+type jsonContextStack []_ParseContext
+
+func (s *jsonContextStack) push(v _ParseContext) {
+ *s = append(*s, v)
+}
+
+func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
+ l := len(s)
+ if l <= 0 {
+ return
+ }
+ return s[l-1], true
+}
+
+func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
+ l := len(*s)
+ if l <= 0 {
+ return
+ }
+ v = (*s)[l-1]
+ *s = (*s)[0 : l-1]
+ return v, true
+}
+
+var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
+
+// Simple JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages. It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+ trans TTransport
+
+ parseContextStack jsonContextStack
+ dumpContext jsonContextStack
+
+ writer *bufio.Writer
+ reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+ v := &TSimpleJSONProtocol{trans: t,
+ writer: bufio.NewWriter(t),
+ reader: bufio.NewReader(t),
+ }
+ v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+ v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
+ return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+ return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+ return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+ JSON_COMMA []byte
+ JSON_COLON []byte
+ JSON_LBRACE []byte
+ JSON_RBRACE []byte
+ JSON_LBRACKET []byte
+ JSON_RBRACKET []byte
+ JSON_QUOTE byte
+ JSON_QUOTE_BYTES []byte
+ JSON_NULL []byte
+ JSON_TRUE []byte
+ JSON_FALSE []byte
+ JSON_INFINITY string
+ JSON_NEGATIVE_INFINITY string
+ JSON_NAN string
+ JSON_INFINITY_BYTES []byte
+ JSON_NEGATIVE_INFINITY_BYTES []byte
+ JSON_NAN_BYTES []byte
+ json_nonbase_map_elem_bytes []byte
+)
+
+func init() {
+ JSON_COMMA = []byte{','}
+ JSON_COLON = []byte{':'}
+ JSON_LBRACE = []byte{'{'}
+ JSON_RBRACE = []byte{'}'}
+ JSON_LBRACKET = []byte{'['}
+ JSON_RBRACKET = []byte{']'}
+ JSON_QUOTE = '"'
+ JSON_QUOTE_BYTES = []byte{'"'}
+ JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+ JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+ JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+ JSON_INFINITY = "Infinity"
+ JSON_NEGATIVE_INFINITY = "-Infinity"
+ JSON_NAN = "NaN"
+ JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+ JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+ json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+ b, _ := json.Marshal(s)
+ s1 := string(b)
+ return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+ s1 := new(string)
+ err := json.Unmarshal([]byte(s), s1)
+ return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+ return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+ p.resetContextStack() // THRIFT-3735
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteString(ctx, name); e != nil {
+ return e
+ }
+ if e := p.WriteByte(ctx, int8(typeId)); e != nil {
+ return e
+ }
+ if e := p.WriteI32(ctx, seqId); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
+ if e := p.OutputObjectBegin(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
+ return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+ if e := p.WriteString(ctx, name); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.WriteByte(ctx, int8(keyType)); e != nil {
+ return e
+ }
+ if e := p.WriteByte(ctx, int8(valueType)); e != nil {
+ return e
+ }
+ return p.WriteI32(ctx, int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+ return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
+ return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
+ return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+ return p.WriteI32(ctx, int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+ return p.WriteI32(ctx, int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
+ return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
+ return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
+ return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
+ // JSON library only takes in a string,
+ // not an arbitrary byte array, to ensure bytes are transmitted
+ // efficiently we must convert this into a valid JSON string
+ // therefore we use base64 encoding to avoid excessive escaping/quoting
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+ if _, e := writer.Write(v); e != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ return NewTProtocolException(e)
+ }
+ if e := writer.Close(); e != nil {
+ return NewTProtocolException(e)
+ }
+ if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+ p.resetContextStack() // THRIFT-3735
+ if isNull, err := p.ParseListBegin(); isNull || err != nil {
+ return name, typeId, seqId, err
+ }
+ if name, err = p.ReadString(ctx); err != nil {
+ return name, typeId, seqId, err
+ }
+ bTypeId, err := p.ReadByte(ctx)
+ typeId = TMessageType(bTypeId)
+ if err != nil {
+ return name, typeId, seqId, err
+ }
+ if seqId, err = p.ReadI32(ctx); err != nil {
+ return name, typeId, seqId, err
+ }
+ return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+ _, err = p.ParseObjectStart()
+ return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
+ return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return "", STOP, 0, err
+ }
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return "", STOP, 0, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ name, err := p.ParseStringBody()
+ // simplejson is not meant to be read back into thrift
+ // - see http://wiki.apache.org/thrift/ThriftUsageJava
+ // - use JSON instead
+ if err != nil {
+ return name, STOP, 0, err
+ }
+ return name, STOP, -1, p.ParsePostValue()
+ }
+ e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+ return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, VOID, 0, e
+ }
+
+ // read keyType
+ bKeyType, e := p.ReadByte(ctx)
+ keyType = TType(bKeyType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read valueType
+ bValueType, e := p.ReadByte(ctx)
+ valueType = TType(bValueType)
+ if e != nil {
+ return keyType, valueType, size, e
+ }
+
+ // read size
+ iSize, err := p.ReadI64(ctx)
+ size = int(iSize)
+ return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
+ return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
+ return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
+ var value bool
+
+ if err := p.ParsePreValue(); err != nil {
+ return value, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 {
+ switch f[0] {
+ case JSON_TRUE[0]:
+ b := make([]byte, len(JSON_TRUE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_TRUE) {
+ value = true
+ } else {
+ e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_FALSE[0]:
+ b := make([]byte, len(JSON_FALSE))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_FALSE) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ break
+ case JSON_NULL[0]:
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return false, NewTProtocolException(err)
+ }
+ if string(b) == string(JSON_NULL) {
+ value = false
+ } else {
+ e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ default:
+ e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+ return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+ v, err := p.ReadI64(ctx)
+ return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+ v, err := p.ReadI64(ctx)
+ return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+ v, err := p.ReadI64(ctx)
+ return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
+ v, _, err := p.ParseI64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
+ v, _, err := p.ParseF64()
+ return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
+ var v string
+ if err := p.ParsePreValue(); err != nil {
+ return v, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseStringBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+ var v []byte
+ if err := p.ParsePreValue(); err != nil {
+ return nil, err
+ }
+ f, _ := p.reader.Peek(1)
+ if len(f) > 0 && f[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ value, err := p.ParseBase64EncodedBody()
+ v = value
+ if err != nil {
+ return v, err
+ }
+ } else if len(f) > 0 && f[0] == JSON_NULL[0] {
+ b := make([]byte, len(JSON_NULL))
+ _, err := p.reader.Read(b)
+ if err != nil {
+ return v, NewTProtocolException(err)
+ }
+ if string(b) != string(JSON_NULL) {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ } else {
+ e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+ return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+
+ return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
+ return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+ return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+ return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ switch cxt {
+ case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if _, e := p.write(JSON_COMMA); e != nil {
+ return NewTProtocolException(e)
+ }
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if _, e := p.write(JSON_COLON); e != nil {
+ return NewTProtocolException(e)
+ }
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_LIST)
+ case _CONTEXT_IN_OBJECT_FIRST:
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.dumpContext.pop()
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if value {
+ v = string(JSON_TRUE)
+ } else {
+ v = string(JSON_FALSE)
+ }
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ switch cxt {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_NULL); e != nil {
+ return NewTProtocolException(e)
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ var v string
+ if math.IsNaN(value) {
+ v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+ } else if math.IsInf(value, 1) {
+ v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+ } else if math.IsInf(value, -1) {
+ v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+ } else {
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ v = strconv.FormatFloat(value, 'g', -1, 64)
+ switch cxt {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+ }
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ cxt, ok := p.dumpContext.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ v := strconv.FormatInt(value, 10)
+ switch cxt {
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ v = jsonQuote(v)
+ }
+ if e := p.OutputStringData(v); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if e := p.OutputStringData(jsonQuote(s)); e != nil {
+ return e
+ }
+ return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+ _, e := p.write([]byte(s))
+ return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+ if _, e := p.write(JSON_RBRACE); e != nil {
+ return NewTProtocolException(e)
+ }
+ _, ok := p.dumpContext.pop()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+ if e := p.OutputPreValue(); e != nil {
+ return e
+ }
+ if _, e := p.write(JSON_LBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+ if _, e := p.write(JSON_RBRACKET); e != nil {
+ return NewTProtocolException(e)
+ }
+ _, ok := p.dumpContext.pop()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ if e := p.OutputPostValue(); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+ if e := p.OutputListBegin(); e != nil {
+ return e
+ }
+ if e := p.OutputI64(int64(elemType)); e != nil {
+ return e
+ }
+ if e := p.OutputI64(int64(size)); e != nil {
+ return e
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt, ok := p.parseContextStack.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ b, _ := p.reader.Peek(1)
+ switch cxt {
+ case _CONTEXT_IN_LIST:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACKET[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ case _CONTEXT_IN_OBJECT_NEXT_KEY:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_RBRACE[0]:
+ return nil
+ case JSON_COMMA[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ if len(b) > 0 {
+ switch b[0] {
+ case JSON_COLON[0]:
+ p.reader.ReadByte()
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ return nil
+ default:
+ e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+ if e := p.readNonSignificantWhitespace(); e != nil {
+ return NewTProtocolException(e)
+ }
+ cxt, ok := p.parseContextStack.peek()
+ if !ok {
+ return errEmptyJSONContextStack
+ }
+ switch cxt {
+ case _CONTEXT_IN_LIST_FIRST:
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_LIST)
+ case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+ case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+ p.parseContextStack.pop()
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+ for {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return nil
+ }
+ switch b[0] {
+ case ' ', '\r', '\n', '\t':
+ p.reader.ReadByte()
+ continue
+ default:
+ break
+ }
+ break
+ }
+ return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+ if !ok {
+ return "", NewTProtocolException(err)
+ }
+ return v, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ str := string(JSON_QUOTE) + line + s
+ v, ok := jsonUnquote(str)
+ if !ok {
+ e := fmt.Errorf("Unable to parse as JSON string %s", str)
+ return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+ line, err := p.reader.ReadString(JSON_QUOTE)
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ l := len(line)
+ // count number of escapes to see if we need to keep going
+ i := 1
+ for ; i < l; i++ {
+ if line[l-i-1] != '\\' {
+ break
+ }
+ }
+ if i&0x01 == 1 {
+ return line, nil
+ }
+ s, err := p.ParseQuotedStringBody()
+ if err != nil {
+ return "", NewTProtocolException(err)
+ }
+ v := line + s
+ return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+ line, err := p.reader.ReadBytes(JSON_QUOTE)
+ if err != nil {
+ return line, NewTProtocolException(err)
+ }
+ line2 := line[0 : len(line)-1]
+ l := len(line2)
+ if (l % 4) != 0 {
+ pad := 4 - (l % 4)
+ fill := [...]byte{'=', '=', '='}
+ line2 = append(line2, fill[:pad]...)
+ l = len(line2)
+ }
+ output := make([]byte, base64.StdEncoding.DecodedLen(l))
+ n, err := base64.StdEncoding.Decode(output, line2)
+ return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value int64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Int64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return 0, false, err
+ }
+ var value float64
+ var isnull bool
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ isnull = true
+ } else {
+ num, err := p.readNumeric()
+ isnull = (num == nil)
+ if !isnull {
+ value = num.Float64()
+ }
+ if err != nil {
+ return value, isnull, err
+ }
+ }
+ return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+ if err := p.ParsePreValue(); err != nil {
+ return false, err
+ }
+ var b []byte
+ b, err := p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+ p.reader.ReadByte()
+ p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
+ return false, nil
+ } else if p.safePeekContains(JSON_NULL) {
+ return true, nil
+ }
+ e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+ return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt, _ := p.parseContextStack.peek()
+ if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+ e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACE[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', '}':
+ break
+ }
+ }
+ p.parseContextStack.pop()
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+ if e := p.ParsePreValue(); e != nil {
+ return false, e
+ }
+ var b []byte
+ b, err = p.reader.Peek(1)
+ if err != nil {
+ return false, err
+ }
+ if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+ p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
+ p.reader.ReadByte()
+ isNull = false
+ } else if p.safePeekContains(JSON_NULL) {
+ isNull = true
+ } else {
+ err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+ }
+ return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+ if isNull, e := p.ParseListBegin(); isNull || e != nil {
+ return VOID, 0, e
+ }
+ bElemType, _, err := p.ParseI64()
+ elemType = TType(bElemType)
+ if err != nil {
+ return elemType, size, err
+ }
+ nSize, _, err2 := p.ParseI64()
+ size = int(nSize)
+ return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+ if isNull, err := p.readIfNull(); isNull || err != nil {
+ return err
+ }
+ cxt, _ := p.parseContextStack.peek()
+ if cxt != _CONTEXT_IN_LIST {
+ e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ line, err := p.reader.ReadString(JSON_RBRACKET[0])
+ if err != nil {
+ return NewTProtocolException(err)
+ }
+ for _, char := range line {
+ switch char {
+ default:
+ e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
+ return NewTProtocolExceptionWithType(INVALID_DATA, e)
+ case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+ break
+ }
+ }
+ p.parseContextStack.pop()
+ if cxt, ok := p.parseContextStack.peek(); !ok {
+ return errEmptyJSONContextStack
+ } else if cxt == _CONTEXT_IN_TOPLEVEL {
+ return nil
+ }
+ return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+ e := p.readNonSignificantWhitespace()
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ b, e := p.reader.Peek(1)
+ if len(b) > 0 {
+ c := b[0]
+ switch c {
+ case JSON_NULL[0]:
+ buf := make([]byte, len(JSON_NULL))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return nil, VOID, NewTProtocolException(e)
+ }
+ if string(JSON_NULL) != string(buf) {
+ e = mismatch(string(JSON_NULL), string(buf))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return nil, VOID, nil
+ case JSON_QUOTE:
+ p.reader.ReadByte()
+ v, e := p.ParseStringBody()
+ if e != nil {
+ return v, UTF8, NewTProtocolException(e)
+ }
+ if v == JSON_INFINITY {
+ return INFINITY, DOUBLE, nil
+ } else if v == JSON_NEGATIVE_INFINITY {
+ return NEGATIVE_INFINITY, DOUBLE, nil
+ } else if v == JSON_NAN {
+ return NAN, DOUBLE, nil
+ }
+ return v, UTF8, nil
+ case JSON_TRUE[0]:
+ buf := make([]byte, len(JSON_TRUE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return true, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_TRUE) != string(buf) {
+ e := mismatch(string(JSON_TRUE), string(buf))
+ return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return true, BOOL, nil
+ case JSON_FALSE[0]:
+ buf := make([]byte, len(JSON_FALSE))
+ _, e := p.reader.Read(buf)
+ if e != nil {
+ return false, BOOL, NewTProtocolException(e)
+ }
+ if string(JSON_FALSE) != string(buf) {
+ e := mismatch(string(JSON_FALSE), string(buf))
+ return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return false, BOOL, nil
+ case JSON_LBRACKET[0]:
+ _, e := p.reader.ReadByte()
+ return make([]interface{}, 0), LIST, NewTProtocolException(e)
+ case JSON_LBRACE[0]:
+ _, e := p.reader.ReadByte()
+ return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+ // assume numeric
+ v, e := p.readNumeric()
+ return v, DOUBLE, e
+ default:
+ e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+ return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+ cont := true
+ for cont {
+ b, _ := p.reader.Peek(1)
+ if len(b) < 1 {
+ return false, nil
+ }
+ switch b[0] {
+ default:
+ return false, nil
+ case JSON_NULL[0]:
+ cont = false
+ break
+ case ' ', '\n', '\r', '\t':
+ p.reader.ReadByte()
+ break
+ }
+ }
+ if p.safePeekContains(JSON_NULL) {
+ p.reader.Read(make([]byte, len(JSON_NULL)))
+ return true, nil
+ }
+ return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+ b, _ := p.reader.Peek(1)
+ if len(b) > 0 && b[0] == JSON_QUOTE {
+ p.reader.ReadByte()
+ }
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+ isNull, err := p.readIfNull()
+ if isNull || err != nil {
+ return NUMERIC_NULL, err
+ }
+ hasDecimalPoint := false
+ nextCanBeSign := true
+ hasE := false
+ MAX_LEN := 40
+ buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+ continueFor := true
+ inQuotes := false
+ for continueFor {
+ c, err := p.reader.ReadByte()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return NUMERIC_NULL, NewTProtocolException(err)
+ }
+ switch c {
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case '.':
+ if hasDecimalPoint {
+ e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasDecimalPoint, nextCanBeSign = true, false
+ case 'e', 'E':
+ if hasE {
+ e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ hasE, nextCanBeSign = true, true
+ case '-', '+':
+ if !nextCanBeSign {
+ e := fmt.Errorf("Negative sign within number")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ buf.WriteByte(c)
+ nextCanBeSign = false
+ case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+ p.reader.UnreadByte()
+ continueFor = false
+ case JSON_NAN[0]:
+ if buf.Len() == 0 {
+ buffer := make([]byte, len(JSON_NAN))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NAN != string(buffer) {
+ e := mismatch(JSON_NAN, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NAN, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_INFINITY[0]:
+ if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+ buffer := make([]byte, len(JSON_INFINITY))
+ buffer[0] = c
+ _, e := p.reader.Read(buffer[1:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_INFINITY != string(buffer) {
+ e := mismatch(JSON_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return INFINITY, nil
+ } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+ buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+ buffer[0] = JSON_NEGATIVE_INFINITY[0]
+ buffer[1] = c
+ _, e := p.reader.Read(buffer[2:])
+ if e != nil {
+ return NUMERIC_NULL, NewTProtocolException(e)
+ }
+ if JSON_NEGATIVE_INFINITY != string(buffer) {
+ e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ if inQuotes {
+ p.readQuoteIfNext()
+ }
+ return NEGATIVE_INFINITY, nil
+ } else {
+ e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ case JSON_QUOTE:
+ if !inQuotes {
+ inQuotes = true
+ } else {
+ break
+ }
+ default:
+ e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ }
+ if buf.Len() == 0 {
+ e := fmt.Errorf("Unable to parse number from empty string ''")
+ return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+ }
+ return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+ for i := 0; i < len(b); i++ {
+ a, _ := p.reader.Peek(i + 1)
+ if len(a) < (i+1) || a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+ p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+ p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+ n, err := p.writer.Write(b)
+ if err != nil {
+ p.writer.Reset(p.trans) // THRIFT-3735
+ }
+ return n, err
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
+ PropagateTConfiguration(p.trans, conf)
+}
+
+var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
new file mode 100644
index 0000000..563cbfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+// ErrAbandonRequest is a special error server handler implementations can
+// return to indicate that the request has been abandoned.
+//
+// TSimpleServer will check for this error, and close the client connection
+// instead of writing the response/error back to the client.
+//
+// It shall only be used when the server handler implementation know that the
+// client already abandoned the request (by checking that the passed in context
+// is already canceled, for example).
+var ErrAbandonRequest = errors.New("request abandoned")
+
+// ServerConnectivityCheckInterval defines the ticker interval used by
+// connectivity check in thrift compiled TProcessorFunc implementations.
+//
+// It's defined as a variable instead of constant, so that thrift server
+// implementations can change its value to control the behavior.
+//
+// If it's changed to <=0, the feature will be disabled.
+var ServerConnectivityCheckInterval = time.Millisecond * 5
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+ closed int32
+ wg sync.WaitGroup
+ mu sync.Mutex
+
+ processorFactory TProcessorFactory
+ serverTransport TServerTransport
+ inputTransportFactory TTransportFactory
+ outputTransportFactory TTransportFactory
+ inputProtocolFactory TProtocolFactory
+ outputProtocolFactory TProtocolFactory
+
+ // Headers to auto forward in THeaderProtocol
+ forwardHeaders []string
+
+ logger Logger
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+ return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+ serverTransport,
+ transportFactory,
+ protocolFactory,
+ )
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+ serverTransport,
+ inputTransportFactory,
+ outputTransportFactory,
+ inputProtocolFactory,
+ outputProtocolFactory,
+ )
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+ return NewTSimpleServerFactory6(processorFactory,
+ serverTransport,
+ NewTTransportFactory(),
+ NewTTransportFactory(),
+ NewTBinaryProtocolFactoryDefault(),
+ NewTBinaryProtocolFactoryDefault(),
+ )
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+ return NewTSimpleServerFactory6(processorFactory,
+ serverTransport,
+ transportFactory,
+ transportFactory,
+ protocolFactory,
+ protocolFactory,
+ )
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+ return &TSimpleServer{
+ processorFactory: processorFactory,
+ serverTransport: serverTransport,
+ inputTransportFactory: inputTransportFactory,
+ outputTransportFactory: outputTransportFactory,
+ inputProtocolFactory: inputProtocolFactory,
+ outputProtocolFactory: outputProtocolFactory,
+ }
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+ return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+ return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+ return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+ return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+ return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+ return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+ return p.serverTransport.Listen()
+}
+
+// SetForwardHeaders sets the list of header keys that will be auto forwarded
+// while using THeaderProtocol.
+//
+// "forward" means that when the server is also a client to other upstream
+// thrift servers, the context object user gets in the processor functions will
+// have both read and write headers set, with write headers being forwarded.
+// Users can always override the write headers by calling SetWriteHeaderList
+// before calling thrift client functions.
+func (p *TSimpleServer) SetForwardHeaders(headers []string) {
+ size := len(headers)
+ if size == 0 {
+ p.forwardHeaders = nil
+ return
+ }
+
+ keys := make([]string, size)
+ copy(keys, headers)
+ p.forwardHeaders = keys
+}
+
+// SetLogger sets the logger used by this TSimpleServer.
+//
+// If no logger was set before Serve is called, a default logger using standard
+// log library will be used.
+func (p *TSimpleServer) SetLogger(logger Logger) {
+ p.logger = logger
+}
+
+func (p *TSimpleServer) innerAccept() (int32, error) {
+ client, err := p.serverTransport.Accept()
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ closed := atomic.LoadInt32(&p.closed)
+ if closed != 0 {
+ return closed, nil
+ }
+ if err != nil {
+ return 0, err
+ }
+ if client != nil {
+ p.wg.Add(1)
+ go func() {
+ defer p.wg.Done()
+ if err := p.processRequests(client); err != nil {
+ p.logger(fmt.Sprintf("error processing request: %v", err))
+ }
+ }()
+ }
+ return 0, nil
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+ for {
+ closed, err := p.innerAccept()
+ if err != nil {
+ return err
+ }
+ if closed != 0 {
+ return nil
+ }
+ }
+}
+
+func (p *TSimpleServer) Serve() error {
+ p.logger = fallbackLogger(p.logger)
+
+ err := p.Listen()
+ if err != nil {
+ return err
+ }
+ p.AcceptLoop()
+ return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if atomic.LoadInt32(&p.closed) != 0 {
+ return nil
+ }
+ atomic.StoreInt32(&p.closed, 1)
+ p.serverTransport.Interrupt()
+ p.wg.Wait()
+ return nil
+}
+
+// If err is actually EOF, return nil, otherwise return err as-is.
+func treatEOFErrorsAsNil(err error) error {
+ if err == nil {
+ return nil
+ }
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ var te TTransportException
+ if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
+ return nil
+ }
+ return err
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) (err error) {
+ defer func() {
+ err = treatEOFErrorsAsNil(err)
+ }()
+
+ processor := p.processorFactory.GetProcessor(client)
+ inputTransport, err := p.inputTransportFactory.GetTransport(client)
+ if err != nil {
+ return err
+ }
+ inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+ var outputTransport TTransport
+ var outputProtocol TProtocol
+
+ // for THeaderProtocol, we must use the same protocol instance for
+ // input and output so that the response is in the same dialect that
+ // the server detected the request was in.
+ headerProtocol, ok := inputProtocol.(*THeaderProtocol)
+ if ok {
+ outputProtocol = inputProtocol
+ } else {
+ oTrans, err := p.outputTransportFactory.GetTransport(client)
+ if err != nil {
+ return err
+ }
+ outputTransport = oTrans
+ outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
+ }
+
+ if inputTransport != nil {
+ defer inputTransport.Close()
+ }
+ if outputTransport != nil {
+ defer outputTransport.Close()
+ }
+ for {
+ if atomic.LoadInt32(&p.closed) != 0 {
+ return nil
+ }
+
+ ctx := SetResponseHelper(
+ defaultCtx,
+ TResponseHelper{
+ THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
+ },
+ )
+ if headerProtocol != nil {
+ // We need to call ReadFrame here, otherwise we won't
+ // get any headers on the AddReadTHeaderToContext call.
+ //
+ // ReadFrame is safe to be called multiple times so it
+ // won't break when it's called again later when we
+ // actually start to read the message.
+ if err := headerProtocol.ReadFrame(ctx); err != nil {
+ return err
+ }
+ ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
+ ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
+ }
+
+ ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
+ if errors.Is(err, ErrAbandonRequest) {
+ return client.Close()
+ }
+ if errors.As(err, new(TTransportException)) && err != nil {
+ return err
+ }
+ var tae TApplicationException
+ if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
+ continue
+ }
+ if !ok {
+ break
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
new file mode 100644
index 0000000..ba2738a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "context"
+ "errors"
+ "io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+ Flush() (err error)
+}
+
+type ContextFlusher interface {
+ Flush(ctx context.Context) (err error)
+}
+
+type ReadSizeProvider interface {
+ RemainingBytes() (num_bytes uint64)
+}
+
+// Encapsulates the I/O layer
+type TTransport interface {
+ io.ReadWriteCloser
+ ContextFlusher
+ ReadSizeProvider
+
+ // Opens the transport for communication
+ Open() error
+
+ // Returns true if the transport is open
+ IsOpen() bool
+}
+
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+ io.ReadWriter
+ io.ByteReader
+ io.ByteWriter
+ stringWriter
+ ContextFlusher
+ ReadSizeProvider
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
new file mode 100644
index 0000000..0a3f076
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+ "errors"
+ "io"
+)
+
+type timeoutable interface {
+ Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+ TException
+ TypeId() int
+ Err() error
+}
+
+const (
+ UNKNOWN_TRANSPORT_EXCEPTION = 0
+ NOT_OPEN = 1
+ ALREADY_OPEN = 2
+ TIMED_OUT = 3
+ END_OF_FILE = 4
+)
+
+type tTransportException struct {
+ typeId int
+ err error
+ msg string
+}
+
+var _ TTransportException = (*tTransportException)(nil)
+
+func (tTransportException) TExceptionType() TExceptionType {
+ return TExceptionTypeTransport
+}
+
+func (p *tTransportException) TypeId() int {
+ return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+ return p.msg
+}
+
+func (p *tTransportException) Err() error {
+ return p.err
+}
+
+func (p *tTransportException) Unwrap() error {
+ return p.err
+}
+
+func (p *tTransportException) Timeout() bool {
+ return p.typeId == TIMED_OUT
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+ return &tTransportException{
+ typeId: t,
+ err: errors.New(e),
+ msg: e,
+ }
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+ if e == nil {
+ return nil
+ }
+
+ if t, ok := e.(TTransportException); ok {
+ return t
+ }
+
+ te := &tTransportException{
+ typeId: UNKNOWN_TRANSPORT_EXCEPTION,
+ err: e,
+ msg: e.Error(),
+ }
+
+ if isTimeoutError(e) {
+ te.typeId = TIMED_OUT
+ return te
+ }
+
+ if errors.Is(e, io.EOF) {
+ te.typeId = END_OF_FILE
+ return te
+ }
+
+ return te
+}
+
+func prependTTransportException(prepend string, e TTransportException) TTransportException {
+ return &tTransportException{
+ typeId: e.TypeId(),
+ err: e,
+ msg: prepend + e.Error(),
+ }
+}
+
+// isTimeoutError returns true when err is an error caused by timeout.
+//
+// Note that this also includes TTransportException wrapped timeout errors.
+func isTimeoutError(err error) bool {
+ var t timeoutable
+ if errors.As(err, &t) {
+ return t.Timeout()
+ }
+ return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
new file mode 100644
index 0000000..c805807
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+ GetTransport(trans TTransport) (TTransport, error)
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+ return trans, nil
+}
+
+func NewTTransportFactory() TTransportFactory {
+ return &tTransportFactory{}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
new file mode 100644
index 0000000..4292ffc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+ STOP = 0
+ VOID = 1
+ BOOL = 2
+ BYTE = 3
+ I08 = 3
+ DOUBLE = 4
+ I16 = 6
+ I32 = 8
+ I64 = 10
+ STRING = 11
+ UTF7 = 11
+ STRUCT = 12
+ MAP = 13
+ SET = 14
+ LIST = 15
+ UTF8 = 16
+ UTF16 = 17
+ //BINARY = 18 wrong and unusued
+)
+
+var typeNames = map[int]string{
+ STOP: "STOP",
+ VOID: "VOID",
+ BOOL: "BOOL",
+ BYTE: "BYTE",
+ DOUBLE: "DOUBLE",
+ I16: "I16",
+ I32: "I32",
+ I64: "I64",
+ STRING: "STRING",
+ STRUCT: "STRUCT",
+ MAP: "MAP",
+ SET: "SET",
+ LIST: "LIST",
+ UTF8: "UTF8",
+ UTF16: "UTF16",
+}
+
+func (p TType) String() string {
+ if s, ok := typeNames[int(p)]; ok {
+ return s
+ }
+ return "Unknown"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
new file mode 100644
index 0000000..9a627be
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -0,0 +1,493 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "reflect"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Tracer implements opentracing.Tracer.
+type Tracer struct {
+ serviceName string
+ hostIPv4 uint32 // this is for zipkin endpoint conversion
+
+ sampler SamplerV2
+ reporter Reporter
+ metrics Metrics
+ logger log.DebugLogger
+
+ timeNow func() time.Time
+ randomNumber func() uint64
+
+ options struct {
+ gen128Bit bool // whether to generate 128bit trace IDs
+ zipkinSharedRPCSpan bool
+ highTraceIDGenerator func() uint64 // custom high trace ID generator
+ maxTagValueLength int
+ noDebugFlagOnForcedSampling bool
+ maxLogsPerSpan int
+ // more options to come
+ }
+ // allocator of Span objects
+ spanAllocator SpanAllocator
+
+ injectors map[interface{}]Injector
+ extractors map[interface{}]Extractor
+
+ observer compositeObserver
+
+ tags []Tag
+ process Process
+
+ baggageRestrictionManager baggage.RestrictionManager
+ baggageSetter *baggageSetter
+
+ debugThrottler throttler.Throttler
+}
+
+// NewTracer creates Tracer implementation that reports tracing to Jaeger.
+// The returned io.Closer can be used in shutdown hooks to ensure that the internal
+// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+// TODO (breaking change) return *Tracer only, without closer.
+func NewTracer(
+ serviceName string,
+ sampler Sampler,
+ reporter Reporter,
+ options ...TracerOption,
+) (opentracing.Tracer, io.Closer) {
+ t := &Tracer{
+ serviceName: serviceName,
+ sampler: samplerV1toV2(sampler),
+ reporter: reporter,
+ injectors: make(map[interface{}]Injector),
+ extractors: make(map[interface{}]Extractor),
+ metrics: *NewNullMetrics(),
+ spanAllocator: simpleSpanAllocator{},
+ }
+
+ for _, option := range options {
+ option(t)
+ }
+
+ // register default injectors/extractors unless they are already provided via options
+ textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+ t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+
+ binaryPropagator := NewBinaryPropagator(t)
+ t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
+
+ // TODO remove after TChannel supports OpenTracing
+ interopPropagator := &jaegerTraceContextPropagator{tracer: t}
+ t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
+
+ zipkinPropagator := &zipkinPropagator{tracer: t}
+ t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
+
+ if t.baggageRestrictionManager != nil {
+ t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
+ } else {
+ t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
+ }
+ if t.debugThrottler == nil {
+ t.debugThrottler = throttler.DefaultThrottler{}
+ }
+
+ if t.randomNumber == nil {
+ seedGenerator := utils.NewRand(time.Now().UnixNano())
+ pool := sync.Pool{
+ New: func() interface{} {
+ return rand.NewSource(seedGenerator.Int63())
+ },
+ }
+
+ t.randomNumber = func() uint64 {
+ generator := pool.Get().(rand.Source)
+ number := uint64(generator.Int63())
+ pool.Put(generator)
+ return number
+ }
+ }
+ if t.timeNow == nil {
+ t.timeNow = time.Now
+ }
+ if t.logger == nil {
+ t.logger = log.NullLogger
+ }
+ // Set tracer-level tags
+ t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
+ if hostname, err := os.Hostname(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
+ }
+ if ipval, ok := t.getTag(TracerIPTagKey); ok {
+ ipv4, err := utils.ParseIPToUint32(ipval.(string))
+ if err != nil {
+ t.hostIPv4 = 0
+ t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
+ } else {
+ t.hostIPv4 = ipv4
+ }
+ } else if ip, err := utils.HostIP(); err == nil {
+ t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
+ t.hostIPv4 = utils.PackIPAsUint32(ip)
+ } else {
+ t.logger.Error("Unable to determine this host's IP address: " + err.Error())
+ }
+
+ if t.options.gen128Bit {
+ if t.options.highTraceIDGenerator == nil {
+ t.options.highTraceIDGenerator = t.randomNumber
+ }
+ } else if t.options.highTraceIDGenerator != nil {
+ t.logger.Error("Overriding high trace ID generator but not generating " +
+ "128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
+ }
+ if t.options.maxTagValueLength == 0 {
+ t.options.maxTagValueLength = DefaultMaxTagValueLength
+ }
+ t.process = Process{
+ Service: serviceName,
+ UUID: strconv.FormatUint(t.randomNumber(), 16),
+ Tags: t.tags,
+ }
+ if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
+ throttler.SetProcess(t.process)
+ }
+
+ return t, t
+}
+
+// addCodec adds registers injector and extractor for given propagation format if not already defined.
+func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
+ if _, ok := t.injectors[format]; !ok {
+ t.injectors[format] = injector
+ }
+ if _, ok := t.extractors[format]; !ok {
+ t.extractors[format] = extractor
+ }
+}
+
+// StartSpan implements StartSpan() method of opentracing.Tracer.
+func (t *Tracer) StartSpan(
+ operationName string,
+ options ...opentracing.StartSpanOption,
+) opentracing.Span {
+ sso := opentracing.StartSpanOptions{}
+ for _, o := range options {
+ o.Apply(&sso)
+ }
+ return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *Tracer) startSpanWithOptions(
+ operationName string,
+ options opentracing.StartSpanOptions,
+) opentracing.Span {
+ if options.StartTime.IsZero() {
+ options.StartTime = t.timeNow()
+ }
+
+ // Predicate whether the given span context is an empty reference
+ // or may be used as parent / debug ID / baggage items source
+ isEmptyReference := func(ctx SpanContext) bool {
+ return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
+ }
+
+ var references []Reference
+ var parent SpanContext
+ var hasParent bool // need this because `parent` is a value, not reference
+ var ctx SpanContext
+ var isSelfRef bool
+ for _, ref := range options.References {
+ ctxRef, ok := ref.ReferencedContext.(SpanContext)
+ if !ok {
+ t.logger.Error(fmt.Sprintf(
+ "Reference contains invalid type of SpanReference: %s",
+ reflect.ValueOf(ref.ReferencedContext)))
+ continue
+ }
+ if isEmptyReference(ctxRef) {
+ continue
+ }
+
+ if ref.Type == selfRefType {
+ isSelfRef = true
+ ctx = ctxRef
+ continue
+ }
+
+ if ctxRef.IsValid() {
+ // we don't want empty context that contains only debug-id or baggage
+ references = append(references, Reference{Type: ref.Type, Context: ctxRef})
+ }
+
+ if !hasParent {
+ parent = ctxRef
+ hasParent = ref.Type == opentracing.ChildOfRef
+ }
+ }
+ if !hasParent && !isEmptyReference(parent) {
+ // If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
+ // the FollowFromRef as the parent
+ hasParent = true
+ }
+
+ rpcServer := false
+ if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
+ rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
+ }
+
+ var internalTags []Tag
+ newTrace := false
+ if !isSelfRef {
+ if !hasParent || !parent.IsValid() {
+ newTrace = true
+ ctx.traceID.Low = t.randomID()
+ if t.options.gen128Bit {
+ ctx.traceID.High = t.options.highTraceIDGenerator()
+ }
+ ctx.spanID = SpanID(ctx.traceID.Low)
+ ctx.parentID = 0
+ ctx.samplingState = &samplingState{
+ localRootSpan: ctx.spanID,
+ }
+ if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
+ ctx.samplingState.setDebugAndSampled()
+ internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
+ }
+ } else {
+ ctx.traceID = parent.traceID
+ if rpcServer && t.options.zipkinSharedRPCSpan {
+ // Support Zipkin's one-span-per-RPC model
+ ctx.spanID = parent.spanID
+ ctx.parentID = parent.parentID
+ } else {
+ ctx.spanID = SpanID(t.randomID())
+ ctx.parentID = parent.spanID
+ }
+ ctx.samplingState = parent.samplingState
+ if parent.remote {
+ ctx.samplingState.setFinal()
+ ctx.samplingState.localRootSpan = ctx.spanID
+ }
+ }
+ if hasParent {
+ // copy baggage items
+ if l := len(parent.baggage); l > 0 {
+ ctx.baggage = make(map[string]string, len(parent.baggage))
+ for k, v := range parent.baggage {
+ ctx.baggage[k] = v
+ }
+ }
+ }
+ }
+
+ sp := t.newSpan()
+ sp.context = ctx
+ sp.tracer = t
+ sp.operationName = operationName
+ sp.startTime = options.StartTime
+ sp.duration = 0
+ sp.references = references
+ sp.firstInProcess = rpcServer || sp.context.parentID == 0
+
+ if !sp.context.isSamplingFinalized() {
+ decision := t.sampler.OnCreateSpan(sp)
+ sp.applySamplingDecision(decision, false)
+ }
+ sp.observer = t.observer.OnStartSpan(sp, operationName, options)
+
+ if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
+ if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
+ sp.tags = make([]Tag, 0, tagsTotalLength)
+ }
+ sp.tags = append(sp.tags, internalTags...)
+ for k, v := range options.Tags {
+ sp.setTagInternal(k, v, false)
+ }
+ }
+ t.emitNewSpanMetrics(sp, newTrace)
+ return sp
+}
+
+// Inject implements Inject() method of opentracing.Tracer
+func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+ c, ok := ctx.(SpanContext)
+ if !ok {
+ return opentracing.ErrInvalidSpanContext
+ }
+ if injector, ok := t.injectors[format]; ok {
+ return injector.Inject(c, carrier)
+ }
+ return opentracing.ErrUnsupportedFormat
+}
+
+// Extract implements Extract() method of opentracing.Tracer
+func (t *Tracer) Extract(
+ format interface{},
+ carrier interface{},
+) (opentracing.SpanContext, error) {
+ if extractor, ok := t.extractors[format]; ok {
+ spanCtx, err := extractor.Extract(carrier)
+ if err != nil {
+ return nil, err // ensure returned spanCtx is nil
+ }
+ spanCtx.remote = true
+ return spanCtx, nil
+ }
+ return nil, opentracing.ErrUnsupportedFormat
+}
+
+// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
+func (t *Tracer) Close() error {
+ t.logger.Debugf("closing tracer")
+ t.reporter.Close()
+ t.sampler.Close()
+ if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
+ _ = mgr.Close()
+ }
+ if throttler, ok := t.debugThrottler.(io.Closer); ok {
+ _ = throttler.Close()
+ }
+ return nil
+}
+
+// Tags returns a slice of tracer-level tags.
+func (t *Tracer) Tags() []opentracing.Tag {
+ tags := make([]opentracing.Tag, len(t.tags))
+ for i, tag := range t.tags {
+ tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
+ }
+ return tags
+}
+
+// getTag returns the value of specific tag, if not exists, return nil.
+// TODO only used by tests, move there.
+func (t *Tracer) getTag(key string) (interface{}, bool) {
+ for _, tag := range t.tags {
+ if tag.key == key {
+ return tag.value, true
+ }
+ }
+ return nil, false
+}
+
+// newSpan returns an instance of a clean Span object.
+// If options.PoolSpans is true, the spans are retrieved from an object pool.
+func (t *Tracer) newSpan() *Span {
+ return t.spanAllocator.Get()
+}
+
+// emitNewSpanMetrics generates metrics on the number of started spans and traces.
+// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
+// server-side RPC span has the exact same trace/span/parent IDs as the
+// calling client-side span, but obviously the server side span is
+// no longer a root span of the trace.
+func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
+ if !sp.context.isSamplingFinalized() {
+ t.metrics.SpansStartedDelayedSampling.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedDelayedSampling.Inc(1)
+ }
+ // joining a trace is not possible, because sampling decision inherited from upstream is final
+ } else if sp.context.IsSampled() {
+ t.metrics.SpansStartedSampled.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedSampled.Inc(1)
+ }
+ } else {
+ t.metrics.SpansStartedNotSampled.Inc(1)
+ if newTrace {
+ t.metrics.TracesStartedNotSampled.Inc(1)
+ } else if sp.firstInProcess {
+ t.metrics.TracesJoinedNotSampled.Inc(1)
+ }
+ }
+}
+
+func (t *Tracer) reportSpan(sp *Span) {
+ ctx := sp.SpanContext()
+
+ if !ctx.isSamplingFinalized() {
+ t.metrics.SpansFinishedDelayedSampling.Inc(1)
+ } else if ctx.IsSampled() {
+ t.metrics.SpansFinishedSampled.Inc(1)
+ } else {
+ t.metrics.SpansFinishedNotSampled.Inc(1)
+ }
+
+ // Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
+ // and then Release() it when no longer needed.
+ // Otherwise, the span may be reused for another trace and its data may be overwritten.
+ if ctx.IsSampled() {
+ t.reporter.Report(sp)
+ }
+
+ sp.Release()
+}
+
+// randomID generates a random trace/span ID, using tracer.random() generator.
+// It never returns 0.
+func (t *Tracer) randomID() uint64 {
+ val := t.randomNumber()
+ for val == 0 {
+ val = t.randomNumber()
+ }
+ return val
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) setBaggage(sp *Span, key, value string) {
+ t.baggageSetter.setBaggage(sp, key, value)
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) isDebugAllowed(operation string) bool {
+ return t.debugThrottler.IsAllowed(operation)
+}
+
+// Sampler returns the sampler given to the tracer at creation.
+func (t *Tracer) Sampler() SamplerV2 {
+ return t.sampler
+}
+
+// SelfRef creates an opentracing compliant SpanReference from a jaeger
+// SpanContext. This is a factory function in order to encapsulate jaeger specific
+// types.
+func SelfRef(ctx SpanContext) opentracing.SpanReference {
+ return opentracing.SpanReference{
+ Type: selfRefType,
+ ReferencedContext: ctx,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
new file mode 100644
index 0000000..f0734b7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -0,0 +1,182 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "time"
+
+ "github.com/opentracing/opentracing-go"
+
+ "github.com/uber/jaeger-client-go/internal/baggage"
+ "github.com/uber/jaeger-client-go/internal/throttler"
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// TracerOption is a function that sets some option on the tracer
+type TracerOption func(tracer *Tracer)
+
+// TracerOptions is a factory for all available TracerOption's
+var TracerOptions tracerOptions
+
+type tracerOptions struct{}
+
+// Metrics creates a TracerOption that initializes Metrics on the tracer,
+// which is used to emit statistics.
+func (tracerOptions) Metrics(m *Metrics) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.metrics = *m
+ }
+}
+
+// Logger creates a TracerOption that gives the tracer a Logger.
+func (tracerOptions) Logger(logger Logger) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.logger = log.DebugLogAdapter(logger)
+ }
+}
+
+func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+ return func(tracer *Tracer) {
+ if headerKeys == nil {
+ return
+ }
+ textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+ httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+ tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+ }
+}
+
+// TimeNow creates a TracerOption that gives the tracer a function
+// used to generate timestamps for spans.
+func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.timeNow = timeNow
+ }
+}
+
+// RandomNumber creates a TracerOption that gives the tracer
+// a thread-safe random number generator function for generating trace IDs.
+func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.randomNumber = randomNumber
+ }
+}
+
+// PoolSpans creates a TracerOption that tells the tracer whether it should use
+// an object pool to minimize span allocations.
+// This should be used with care, only if the service is not running any async tasks
+// that can access parent spans after those spans have been finished.
+func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+ return func(tracer *Tracer) {
+ if poolSpans {
+ tracer.spanAllocator = newSyncPollSpanAllocator()
+ } else {
+ tracer.spanAllocator = simpleSpanAllocator{}
+ }
+ }
+}
+
+// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// If not set, the factory method will obtain the current IP address.
+// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
+func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.hostIPv4 = hostIPv4
+ }
+}
+
+func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.injectors[format] = injector
+ }
+}
+
+func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.extractors[format] = extractor
+ }
+}
+
+func (t tracerOptions) Observer(observer Observer) TracerOption {
+ return t.ContribObserver(&oldObserver{obs: observer})
+}
+
+func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.observer.append(observer)
+ }
+}
+
+func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.gen128Bit = gen128Bit
+ }
+}
+
+func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
+ }
+}
+
+func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.highTraceIDGenerator = highTraceIDGenerator
+ }
+}
+
+func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.maxTagValueLength = maxTagValueLength
+ }
+}
+
+// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
+// value). If a span has more logs than this value, logs are dropped as
+// necessary (and replaced with a log describing how many were dropped).
+//
+// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
+// half are the newest logs.
+func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.maxLogsPerSpan = maxLogsPerSpan
+ }
+}
+
+func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+ }
+}
+
+func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.tags = append(tracer.tags, Tag{key: key, value: value})
+ }
+}
+
+func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.baggageRestrictionManager = mgr
+ }
+}
+
+func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+ return func(tracer *Tracer) {
+ tracer.debugThrottler = throttler
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
new file mode 100644
index 0000000..c5f5b19
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "io"
+)
+
+// Transport abstracts the method of sending spans out of process.
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+type Transport interface {
+ // Append converts the span to the wire representation and adds it
+ // to sender's internal buffer. If the buffer exceeds its designated
+ // size, the transport should call Flush() and return the number of spans
+ // flushed, otherwise return 0. If error is returned, the returned number
+ // of spans is treated as failed span, and reported to metrics accordingly.
+ Append(span *Span) (int, error)
+
+ // Flush submits the internal buffer to the remote server. It returns the
+ // number of spans flushed. If error is returned, the returned number of
+ // spans is treated as failed span, and reported to metrics accordingly.
+ Flush() (int, error)
+
+ io.Closer
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
new file mode 100644
index 0000000..6b961fb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport defines various transports that can be used with
+// RemoteReporter to send spans out of process. Transport is responsible
+// for serializing the spans into a specific format suitable for sending
+// to the tracing backend. Examples may include Thrift over UDP, Thrift
+// or JSON over HTTP, Thrift over Kafka, etc.
+//
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
new file mode 100644
index 0000000..1d6f14d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go"
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+// Default timeout for http request in seconds
+const defaultHTTPTimeout = time.Second * 5
+
+// HTTPTransport implements Transport by forwarding spans to a http server.
+type HTTPTransport struct {
+ url string
+ client *http.Client
+ batchSize int
+ spans []*j.Span
+ process *j.Process
+ httpCredentials *HTTPBasicAuthCredentials
+ headers map[string]string
+}
+
+// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
+type HTTPBasicAuthCredentials struct {
+ username string
+ password string
+}
+
+// HTTPOption sets a parameter for the HttpCollector
+type HTTPOption func(c *HTTPTransport)
+
+// HTTPTimeout sets maximum timeout for http request.
+func HTTPTimeout(duration time.Duration) HTTPOption {
+ return func(c *HTTPTransport) { c.client.Timeout = duration }
+}
+
+// HTTPBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 spans.
+func HTTPBatchSize(n int) HTTPOption {
+ return func(c *HTTPTransport) { c.batchSize = n }
+}
+
+// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
+func HTTPBasicAuth(username string, password string) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
+ }
+}
+
+// HTTPRoundTripper configures the underlying Transport on the *http.Client
+// that is used
+func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.client.Transport = transport
+ }
+}
+
+// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request
+func HTTPHeaders(headers map[string]string) HTTPOption {
+ return func(c *HTTPTransport) {
+ c.headers = headers
+ }
+}
+
+// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
+// url of the collector to handle POST request, typically something like:
+// http://hostname:14268/api/traces?format=jaeger.thrift
+func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
+ c := &HTTPTransport{
+ url: url,
+ client: &http.Client{Timeout: defaultHTTPTimeout},
+ batchSize: 100,
+ spans: []*j.Span{},
+ }
+
+ for _, option := range options {
+ option(c)
+ }
+ return c
+}
+
+// Append implements Transport.
+func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
+ if c.process == nil {
+ c.process = jaeger.BuildJaegerProcessThrift(span)
+ }
+ jSpan := jaeger.BuildJaegerThrift(span)
+ c.spans = append(c.spans, jSpan)
+ if len(c.spans) >= c.batchSize {
+ return c.Flush()
+ }
+ return 0, nil
+}
+
+// Flush implements Transport.
+func (c *HTTPTransport) Flush() (int, error) {
+ count := len(c.spans)
+ if count == 0 {
+ return 0, nil
+ }
+ err := c.send(c.spans)
+ c.spans = c.spans[:0]
+ return count, err
+}
+
+// Close implements Transport.
+func (c *HTTPTransport) Close() error {
+ return nil
+}
+
+func (c *HTTPTransport) send(spans []*j.Span) error {
+ batch := &j.Batch{
+ Spans: spans,
+ Process: c.process,
+ }
+ body, err := serializeThrift(batch)
+ if err != nil {
+ return err
+ }
+ req, err := http.NewRequest("POST", c.url, body)
+ if err != nil {
+ return err
+ }
+ req.Header.Set("Content-Type", "application/x-thrift")
+ for k, v := range c.headers {
+ req.Header.Set(k, v)
+ }
+
+ if c.httpCredentials != nil {
+ req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return err
+ }
+ io.Copy(ioutil.Discard, resp.Body)
+ resp.Body.Close()
+ if resp.StatusCode >= http.StatusBadRequest {
+ return fmt.Errorf("error from collector: %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
+ t := thrift.NewTMemoryBuffer()
+ p := thrift.NewTBinaryProtocolTransport(t)
+ if err := obj.Write(context.Background(), p); err != nil {
+ return nil, err
+ }
+ return t.Buffer, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
new file mode 100644
index 0000000..0000412
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -0,0 +1,194 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/uber/jaeger-client-go/internal/reporterstats"
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/thrift"
+ j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+// Empirically obtained constant for how many bytes in the message are used for envelope.
+// The total datagram size is:
+// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
+//
+// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
+// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
+//
+// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
+const emitBatchOverhead = 70
+
+var errSpanTooLarge = errors.New("span is too large")
+
+type udpSender struct {
+ client *utils.AgentClientUDP
+ maxPacketSize int // max size of datagram in bytes
+ maxSpanBytes int // max number of bytes to record spans (excluding envelope) in the datagram
+ byteBufferSize int // current number of span bytes accumulated in the buffer
+ spanBuffer []*j.Span // spans buffered before a flush
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+ thriftProtocol thrift.TProtocol
+ process *j.Process
+ processByteSize int
+
+ // reporterStats provides access to stats that are only known to Reporter
+ reporterStats reporterstats.ReporterStats
+
+ // The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
+ // so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
+ batchSeqNo int64
+ tooLargeDroppedSpans int64
+ failedToEmitSpans int64
+}
+
+// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
+// be passed to NewUDPTransportWithParams.
+type UDPTransportParams struct {
+ utils.AgentClientUDPParams
+}
+
+// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
+ if len(params.HostPort) == 0 {
+ params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+ }
+
+ if params.Logger == nil {
+ params.Logger = log.StdLogger
+ }
+
+ if params.MaxPacketSize == 0 {
+ params.MaxPacketSize = utils.UDPPacketMaxLength
+ }
+
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+
+ // Each span is first written to thriftBuffer to determine its size in bytes.
+ thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
+ thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+
+ client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
+ if err != nil {
+ return nil, err
+ }
+
+ return &udpSender{
+ client: client,
+ maxSpanBytes: params.MaxPacketSize - emitBatchOverhead,
+ thriftBuffer: thriftBuffer,
+ thriftProtocol: thriftProtocol,
+ }, nil
+}
+
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+ return NewUDPTransportWithParams(UDPTransportParams{
+ AgentClientUDPParams: utils.AgentClientUDPParams{
+ HostPort: hostPort,
+ MaxPacketSize: maxPacketSize,
+ },
+ })
+}
+
+// SetReporterStats implements reporterstats.Receiver.
+func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
+ s.reporterStats = rs
+}
+
+func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
+ s.thriftBuffer.Reset()
+ _ = thriftStruct.Write(context.Background(), s.thriftProtocol)
+ return s.thriftBuffer.Len()
+}
+
+func (s *udpSender) Append(span *Span) (int, error) {
+ if s.process == nil {
+ s.process = BuildJaegerProcessThrift(span)
+ s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
+ s.byteBufferSize += s.processByteSize
+ }
+ jSpan := BuildJaegerThrift(span)
+ spanSize := s.calcSizeOfSerializedThrift(jSpan)
+ if spanSize > s.maxSpanBytes {
+ s.tooLargeDroppedSpans++
+ return 1, errSpanTooLarge
+ }
+
+ s.byteBufferSize += spanSize
+ if s.byteBufferSize <= s.maxSpanBytes {
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ if s.byteBufferSize < s.maxSpanBytes {
+ return 0, nil
+ }
+ return s.Flush()
+ }
+ // the latest span did not fit in the buffer
+ n, err := s.Flush()
+ s.spanBuffer = append(s.spanBuffer, jSpan)
+ s.byteBufferSize = spanSize + s.processByteSize
+ return n, err
+}
+
+func (s *udpSender) Flush() (int, error) {
+ n := len(s.spanBuffer)
+ if n == 0 {
+ return 0, nil
+ }
+ s.batchSeqNo++
+ batchSeqNo := int64(s.batchSeqNo)
+ err := s.client.EmitBatch(context.Background(), &j.Batch{
+ Process: s.process,
+ Spans: s.spanBuffer,
+ SeqNo: &batchSeqNo,
+ Stats: s.makeStats(),
+ })
+ s.resetBuffers()
+ if err != nil {
+ s.failedToEmitSpans += int64(n)
+ }
+ return n, err
+}
+
+func (s *udpSender) Close() error {
+ return s.client.Close()
+}
+
+func (s *udpSender) resetBuffers() {
+ for i := range s.spanBuffer {
+ s.spanBuffer[i] = nil
+ }
+ s.spanBuffer = s.spanBuffer[:0]
+ s.byteBufferSize = s.processByteSize
+}
+
+func (s *udpSender) makeStats() *j.ClientStats {
+ var dropped int64
+ if s.reporterStats != nil {
+ dropped = s.reporterStats.SpansDroppedFromQueue()
+ }
+ return &j.ClientStats{
+ FullQueueDroppedSpans: dropped,
+ TooLargeDroppedSpans: s.tooLargeDroppedSpans,
+ FailedToEmitSpans: s.failedToEmitSpans,
+ }
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
new file mode 100644
index 0000000..237211f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+)
+
+// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
+func GetJSON(url string, out interface{}) error {
+ resp, err := http.Get(url)
+ if err != nil {
+ return err
+ }
+ return ReadJSON(resp, out)
+}
+
+// ReadJSON reads JSON from http.Response and parses it into `out`
+func ReadJSON(resp *http.Response, out interface{}) error {
+ defer resp.Body.Close()
+
+ if resp.StatusCode >= 400 {
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+ }
+
+ if out == nil {
+ io.Copy(ioutil.Discard, resp.Body)
+ return nil
+ }
+
+ decoder := json.NewDecoder(resp.Body)
+ return decoder.Decode(out)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
new file mode 100644
index 0000000..b51af77
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "errors"
+ "net"
+)
+
+// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
+
+// scoreAddr scores how likely the given addr is to be a remote address and returns the
+// IP to use when listening. Any address which receives a negative score should not be used.
+// Scores are calculated as:
+// -1 for any unknown IP addresses.
+// +300 for IPv4 addresses
+// +100 for non-local addresses, extra +100 for "up" interaces.
+func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
+ var ip net.IP
+ if netAddr, ok := addr.(*net.IPNet); ok {
+ ip = netAddr.IP
+ } else if netIP, ok := addr.(*net.IPAddr); ok {
+ ip = netIP.IP
+ } else {
+ return -1, nil
+ }
+
+ var score int
+ if ip.To4() != nil {
+ score += 300
+ }
+ if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
+ score += 100
+ if iface.Flags&net.FlagUp != 0 {
+ score += 100
+ }
+ }
+ return score, ip
+}
+
+// HostIP tries to find an IP that can be used by other machines to reach this machine.
+func HostIP() (net.IP, error) {
+ interfaces, err := net.Interfaces()
+ if err != nil {
+ return nil, err
+ }
+
+ bestScore := -1
+ var bestIP net.IP
+ // Select the highest scoring IP as the best IP.
+ for _, iface := range interfaces {
+ addrs, err := iface.Addrs()
+ if err != nil {
+ // Skip this interface if there is an error.
+ continue
+ }
+
+ for _, addr := range addrs {
+ score, ip := scoreAddr(iface, addr)
+ if score > bestScore {
+ bestScore = score
+ bestIP = ip
+ }
+ }
+ }
+
+ if bestScore == -1 {
+ return nil, errors.New("no addresses to listen on")
+ }
+
+ return bestIP, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
new file mode 100644
index 0000000..9875f7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "math/rand"
+ "sync"
+)
+
+// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
+// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
+type lockedSource struct {
+ mut sync.Mutex
+ src rand.Source
+}
+
+// NewRand returns a rand.Rand that is threadsafe.
+func NewRand(seed int64) *rand.Rand {
+ return rand.New(&lockedSource{src: rand.NewSource(seed)})
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.mut.Lock()
+ n = r.src.Int63()
+ r.mut.Unlock()
+ return
+}
+
+// Seed implements Seed() of Source
+func (r *lockedSource) Seed(seed int64) {
+ r.mut.Lock()
+ r.src.Seed(seed)
+ r.mut.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
new file mode 100644
index 0000000..bf2f131
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "sync"
+ "time"
+)
+
+// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+//
+// TODO (breaking change) remove this interface in favor of public struct below
+//
+// Deprecated, use ReconfigurableRateLimiter.
+type RateLimiter interface {
+ CheckCredit(itemCost float64) bool
+}
+
+// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
+// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
+// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
+// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
+// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
+//
+// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
+// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
+// to determine if the message is within the rate limit.
+//
+// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
+// as bytes/second, and calling CheckCredit() with the actual message size.
+//
+// TODO (breaking change) rename to RateLimiter once the interface is removed
+type ReconfigurableRateLimiter struct {
+ lock sync.Mutex
+
+ creditsPerSecond float64
+ balance float64
+ maxBalance float64
+ lastTick time.Time
+
+ timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new ReconfigurableRateLimiter.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
+ return &ReconfigurableRateLimiter{
+ creditsPerSecond: creditsPerSecond,
+ balance: maxBalance,
+ maxBalance: maxBalance,
+ lastTick: time.Now(),
+ timeNow: time.Now,
+ }
+}
+
+// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
+// is not lest than itemCost.
+func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
+ // if we have enough credits to pay for current item, then reduce balance and allow
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
+ return true
+ }
+ // otherwise check if balance can be increased due to time elapsed, and try again
+ rl.updateBalance()
+ if rl.balance >= itemCost {
+ rl.balance -= itemCost
+ return true
+ }
+ return false
+}
+
+// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
+func (rl *ReconfigurableRateLimiter) updateBalance() {
+ // calculate how much time passed since the last tick, and update current tick
+ currentTime := rl.timeNow()
+ elapsedTime := currentTime.Sub(rl.lastTick)
+ rl.lastTick = currentTime
+ // calculate how much credit have we accumulated since the last tick
+ rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
+ if rl.balance > rl.maxBalance {
+ rl.balance = rl.maxBalance
+ }
+}
+
+// Update changes the main parameters of the rate limiter in-place, while retaining
+// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
+// instead of creating a new rate limiter helps to avoid thundering herd when sampling
+// strategies are updated.
+func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
+ rl.lock.Lock()
+ defer rl.lock.Unlock()
+
+ rl.updateBalance() // get up to date balance
+ rl.balance = rl.balance * maxBalance / rl.maxBalance
+ rl.creditsPerSecond = creditsPerSecond
+ rl.maxBalance = maxBalance
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
new file mode 100644
index 0000000..0dffc7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
@@ -0,0 +1,189 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "fmt"
+ "net"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+)
+
+// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+type reconnectingUDPConn struct {
+ hostPort string
+ resolveFunc resolveFunc
+ dialFunc dialFunc
+ logger log.Logger
+ bufferBytes int64
+
+ connMtx sync.RWMutex
+ conn *net.UDPConn
+ destAddr *net.UDPAddr
+ closeChan chan struct{}
+}
+
+type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
+type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
+
+// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) {
+ conn := &reconnectingUDPConn{
+ hostPort: hostPort,
+ resolveFunc: resolveFunc,
+ dialFunc: dialFunc,
+ logger: logger,
+ closeChan: make(chan struct{}),
+ }
+
+ if err := conn.attemptResolveAndDial(); err != nil {
+ logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout))
+ }
+
+ go conn.reconnectLoop(resolveTimeout)
+
+ return conn, nil
+}
+
+func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
+ ticker := time.NewTicker(resolveTimeout)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-c.closeChan:
+ return
+ case <-ticker.C:
+ if err := c.attemptResolveAndDial(); err != nil {
+ c.logger.Error(err.Error())
+ }
+ }
+ }
+}
+
+func (c *reconnectingUDPConn) attemptResolveAndDial() error {
+ newAddr, err := c.resolveFunc("udp", c.hostPort)
+ if err != nil {
+ return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
+ }
+
+ c.connMtx.RLock()
+ curAddr := c.destAddr
+ c.connMtx.RUnlock()
+
+ // dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
+ if curAddr != nil && newAddr.String() == curAddr.String() {
+ return nil
+ }
+
+ if err := c.attemptDialNewAddr(newAddr); err != nil {
+ return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
+ }
+
+ return nil
+}
+
+func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
+ connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
+ if err != nil {
+ return err
+ }
+
+ if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
+ if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
+ return err
+ }
+ }
+
+ c.connMtx.Lock()
+ c.destAddr = newAddr
+ // store prev to close later
+ prevConn := c.conn
+ c.conn = connUDP
+ c.connMtx.Unlock()
+
+ if prevConn != nil {
+ return prevConn.Close()
+ }
+
+ return nil
+}
+
+// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning
+func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
+ var bytesWritten int
+ var err error
+
+ c.connMtx.RLock()
+ if c.conn == nil {
+ // if connection is not initialized indicate this with err in order to hook into retry logic
+ err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
+ } else {
+ bytesWritten, err = c.conn.Write(b)
+ }
+ c.connMtx.RUnlock()
+
+ if err == nil {
+ return bytesWritten, nil
+ }
+
+ // attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
+ if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
+ c.connMtx.RLock()
+ defer c.connMtx.RUnlock()
+ return c.conn.Write(b)
+ }
+
+ // return original error if reconn fails
+ return bytesWritten, err
+}
+
+// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation
+func (c *reconnectingUDPConn) Close() error {
+ close(c.closeChan)
+
+ // acquire rw lock before closing conn to ensure calls to Write drain
+ c.connMtx.Lock()
+ defer c.connMtx.Unlock()
+
+ if c.conn != nil {
+ return c.conn.Close()
+ }
+
+ return nil
+}
+
+// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
+// and SetWriteBuffer is called store bufferBytes to be set for new conns
+func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
+ var err error
+
+ c.connMtx.RLock()
+ if c.conn != nil {
+ err = c.conn.SetWriteBuffer(bytes)
+ }
+ c.connMtx.RUnlock()
+
+ if err == nil {
+ atomic.StoreInt64(&c.bufferBytes, int64(bytes))
+ }
+
+ return err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
new file mode 100644
index 0000000..4c59ae9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -0,0 +1,149 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "time"
+
+ "github.com/uber/jaeger-client-go/log"
+ "github.com/uber/jaeger-client-go/thrift"
+
+ "github.com/uber/jaeger-client-go/thrift-gen/agent"
+ "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+ "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
+const UDPPacketMaxLength = 65000
+
+// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
+type AgentClientUDP struct {
+ agent.Agent
+ io.Closer
+
+ connUDP udpConn
+ client *agent.AgentClient
+ maxPacketSize int // max size of datagram in bytes
+ thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+}
+
+type udpConn interface {
+ Write([]byte) (int, error)
+ SetWriteBuffer(int) error
+ Close() error
+}
+
+// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should
+// be passed to NewAgentClientUDPWithParams.
+type AgentClientUDPParams struct {
+ HostPort string
+ MaxPacketSize int
+ Logger log.Logger
+ DisableAttemptReconnecting bool
+ AttemptReconnectInterval time.Duration
+}
+
+// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) {
+ // validate hostport
+ if _, _, err := net.SplitHostPort(params.HostPort); err != nil {
+ return nil, err
+ }
+
+ if params.MaxPacketSize == 0 {
+ params.MaxPacketSize = UDPPacketMaxLength
+ }
+
+ if params.Logger == nil {
+ params.Logger = log.StdLogger
+ }
+
+ if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 {
+ params.AttemptReconnectInterval = time.Second * 30
+ }
+
+ thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
+ protocolFactory := thrift.NewTCompactProtocolFactory()
+ client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+ var connUDP udpConn
+ var err error
+
+ if params.DisableAttemptReconnecting {
+ destAddr, err := net.ResolveUDPAddr("udp", params.HostPort)
+ if err != nil {
+ return nil, err
+ }
+
+ connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // host is hostname, setup resolver loop in case host record changes during operation
+ connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
+ return nil, err
+ }
+
+ return &AgentClientUDP{
+ connUDP: connUDP,
+ client: client,
+ maxPacketSize: params.MaxPacketSize,
+ thriftBuffer: thriftBuffer,
+ }, nil
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+ return NewAgentClientUDPWithParams(AgentClientUDPParams{
+ HostPort: hostPort,
+ MaxPacketSize: maxPacketSize,
+ })
+}
+
+// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
+func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error {
+ return errors.New("Not implemented")
+}
+
+// EmitBatch implements EmitBatch() of Agent interface
+func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error {
+ a.thriftBuffer.Reset()
+ if err := a.client.EmitBatch(ctx, batch); err != nil {
+ return err
+ }
+ if a.thriftBuffer.Len() > a.maxPacketSize {
+ return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
+ a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+ }
+ _, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+ return err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *AgentClientUDP) Close() error {
+ return a.connUDP.Close()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
new file mode 100644
index 0000000..ac3c325
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+ "encoding/binary"
+ "errors"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var (
+ // ErrEmptyIP an error for empty ip strings
+ ErrEmptyIP = errors.New("empty string given for ip")
+
+ // ErrNotHostColonPort an error for invalid host port string
+ ErrNotHostColonPort = errors.New("expecting host:port")
+
+ // ErrNotFourOctets an error for the wrong number of octets after splitting a string
+ ErrNotFourOctets = errors.New("Wrong number of octets")
+)
+
+// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
+func ParseIPToUint32(ip string) (uint32, error) {
+ if ip == "" {
+ return 0, ErrEmptyIP
+ }
+
+ if ip == "localhost" {
+ return 127<<24 | 1, nil
+ }
+
+ octets := strings.Split(ip, ".")
+ if len(octets) != 4 {
+ return 0, ErrNotFourOctets
+ }
+
+ var intIP uint32
+ for i := 0; i < 4; i++ {
+ octet, err := strconv.Atoi(octets[i])
+ if err != nil {
+ return 0, err
+ }
+ intIP = (intIP << 8) | uint32(octet)
+ }
+
+ return intIP, nil
+}
+
+// ParsePort converts port number from string to uin16
+func ParsePort(portString string) (uint16, error) {
+ port, err := strconv.ParseUint(portString, 10, 16)
+ return uint16(port), err
+}
+
+// PackIPAsUint32 packs an IPv4 as uint32
+func PackIPAsUint32(ip net.IP) uint32 {
+ if ipv4 := ip.To4(); ipv4 != nil {
+ return binary.BigEndian.Uint32(ipv4)
+ }
+ return 0
+}
+
+// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
+// representing time since epoch in microseconds, which is used expected
+// in the Jaeger spans encoded as Thrift.
+func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
+ // ^^^ Passing time.Time by value is faster than passing a pointer!
+ // BenchmarkTimeByValue-8 2000000000 1.37 ns/op
+ // BenchmarkTimeByPtr-8 2000000000 1.98 ns/op
+
+ return t.UnixNano() / 1000
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
new file mode 100644
index 0000000..98cab4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "github.com/opentracing/opentracing-go"
+)
+
+// ZipkinSpanFormat is an OpenTracing carrier format constant
+const ZipkinSpanFormat = "zipkin-span-format"
+
+// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type ExtractableZipkinSpan interface {
+ TraceID() uint64
+ SpanID() uint64
+ ParentID() uint64
+ Flags() byte
+}
+
+// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type InjectableZipkinSpan interface {
+ SetTraceID(traceID uint64)
+ SetSpanID(spanID uint64)
+ SetParentID(parentID uint64)
+ SetFlags(flags byte)
+}
+
+type zipkinPropagator struct {
+ tracer *Tracer
+}
+
+func (p *zipkinPropagator) Inject(
+ ctx SpanContext,
+ abstractCarrier interface{},
+) error {
+ carrier, ok := abstractCarrier.(InjectableZipkinSpan)
+ if !ok {
+ return opentracing.ErrInvalidCarrier
+ }
+
+ carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
+ carrier.SetSpanID(uint64(ctx.SpanID()))
+ carrier.SetParentID(uint64(ctx.ParentID()))
+ carrier.SetFlags(ctx.samplingState.flags())
+ return nil
+}
+
+func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+ carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
+ if !ok {
+ return emptyContext, opentracing.ErrInvalidCarrier
+ }
+ if carrier.TraceID() == 0 {
+ return emptyContext, opentracing.ErrSpanContextNotFound
+ }
+ var ctx SpanContext
+ ctx.traceID.Low = carrier.TraceID()
+ ctx.spanID = SpanID(carrier.SpanID())
+ ctx.parentID = SpanID(carrier.ParentID())
+ ctx.samplingState = &samplingState{}
+ ctx.samplingState.setFlags(carrier.Flags())
+ return ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
new file mode 100644
index 0000000..73aeb00
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -0,0 +1,329 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+ "encoding/binary"
+ "fmt"
+ "time"
+
+ "github.com/opentracing/opentracing-go/ext"
+
+ "github.com/uber/jaeger-client-go/internal/spanlog"
+ z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+ "github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+ // Zipkin UI does not work well with non-string tag values
+ allowPackedNumbers = false
+)
+
+var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
+ string(ext.SpanKind): setSpanKind,
+ string(ext.PeerHostIPv4): setPeerIPv4,
+ string(ext.PeerPort): setPeerPort,
+ string(ext.PeerService): setPeerService,
+ TracerIPTagKey: removeTag,
+}
+
+// BuildZipkinThrift builds thrift span based on internal span.
+// TODO: (breaking change) move to transport/zipkin and make private.
+func BuildZipkinThrift(s *Span) *z.Span {
+ span := &zipkinSpan{Span: s}
+ span.handleSpecialTags()
+ parentID := int64(span.context.parentID)
+ var ptrParentID *int64
+ if parentID != 0 {
+ ptrParentID = &parentID
+ }
+ traceIDHigh := int64(span.context.traceID.High)
+ var ptrTraceIDHigh *int64
+ if traceIDHigh != 0 {
+ ptrTraceIDHigh = &traceIDHigh
+ }
+ timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+ duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+ endpoint := &z.Endpoint{
+ ServiceName: span.tracer.serviceName,
+ Ipv4: int32(span.tracer.hostIPv4)}
+ thriftSpan := &z.Span{
+ TraceID: int64(span.context.traceID.Low),
+ TraceIDHigh: ptrTraceIDHigh,
+ ID: int64(span.context.spanID),
+ ParentID: ptrParentID,
+ Name: span.operationName,
+ Timestamp: ×tamp,
+ Duration: &duration,
+ Debug: span.context.IsDebug(),
+ Annotations: buildAnnotations(span, endpoint),
+ BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
+ return thriftSpan
+}
+
+func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
+ // automatically adding 2 Zipkin CoreAnnotations
+ annotations := make([]*z.Annotation, 0, 2+len(span.logs))
+ var startLabel, endLabel string
+ if span.spanKind == string(ext.SpanKindRPCClientEnum) {
+ startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
+ } else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
+ startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
+ }
+ if !span.startTime.IsZero() && startLabel != "" {
+ start := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
+ Value: startLabel,
+ Host: endpoint}
+ annotations = append(annotations, start)
+ if span.duration != 0 {
+ endTs := span.startTime.Add(span.duration)
+ end := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
+ Value: endLabel,
+ Host: endpoint}
+ annotations = append(annotations, end)
+ }
+ }
+ for _, log := range span.logs {
+ anno := &z.Annotation{
+ Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+ Host: endpoint}
+ if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
+ anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
+ } else {
+ anno.Value = err.Error()
+ }
+ annotations = append(annotations, anno)
+ }
+ return annotations
+}
+
+func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
+ // automatically adding local component or server/client address tag, and client version
+ annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
+
+ if span.peerDefined() && span.isRPC() {
+ peer := z.Endpoint{
+ Ipv4: span.peer.Ipv4,
+ Port: span.peer.Port,
+ ServiceName: span.peer.ServiceName}
+ label := z.CLIENT_ADDR
+ if span.isRPCClient() {
+ label = z.SERVER_ADDR
+ }
+ anno := &z.BinaryAnnotation{
+ Key: label,
+ Value: []byte{1},
+ AnnotationType: z.AnnotationType_BOOL,
+ Host: &peer}
+ annotations = append(annotations, anno)
+ }
+ if !span.isRPC() {
+ componentName := endpoint.ServiceName
+ for _, tag := range span.tags {
+ if tag.key == string(ext.Component) {
+ componentName = stringify(tag.value)
+ break
+ }
+ }
+ local := &z.BinaryAnnotation{
+ Key: z.LOCAL_COMPONENT,
+ Value: []byte(componentName),
+ AnnotationType: z.AnnotationType_STRING,
+ Host: endpoint}
+ annotations = append(annotations, local)
+ }
+ for _, tag := range span.tags {
+ // "Special tags" are already handled by this point, we'd be double reporting the
+ // tags if we don't skip here
+ if _, ok := specialTagHandlers[tag.key]; ok {
+ continue
+ }
+ if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
+ annotations = append(annotations, anno)
+ }
+ }
+ return annotations
+}
+
+func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
+ bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
+ if value, ok := val.(string); ok {
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ } else if value, ok := val.([]byte); ok {
+ if len(value) > maxTagValueLength {
+ value = value[:maxTagValueLength]
+ }
+ bann.Value = value
+ bann.AnnotationType = z.AnnotationType_BYTES
+ } else if value, ok := val.(int32); ok && allowPackedNumbers {
+ bann.Value = int32ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I32
+ } else if value, ok := val.(int64); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(value)
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(int); ok && allowPackedNumbers {
+ bann.Value = int64ToBytes(int64(value))
+ bann.AnnotationType = z.AnnotationType_I64
+ } else if value, ok := val.(bool); ok {
+ bann.Value = []byte{boolToByte(value)}
+ bann.AnnotationType = z.AnnotationType_BOOL
+ } else {
+ value := stringify(val)
+ bann.Value = []byte(truncateString(value, maxTagValueLength))
+ bann.AnnotationType = z.AnnotationType_STRING
+ }
+ return bann
+}
+
+func stringify(value interface{}) string {
+ if s, ok := value.(string); ok {
+ return s
+ }
+ return fmt.Sprintf("%+v", value)
+}
+
+func truncateString(value string, maxLength int) string {
+ // we ignore the problem of utf8 runes possibly being sliced in the middle,
+ // as it is rather expensive to iterate through each tag just to find rune
+ // boundaries.
+ if len(value) > maxLength {
+ return value[:maxLength]
+ }
+ return value
+}
+
+func boolToByte(b bool) byte {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// int32ToBytes converts int32 to bytes.
+func int32ToBytes(i int32) []byte {
+ buf := make([]byte, 4)
+ binary.BigEndian.PutUint32(buf, uint32(i))
+ return buf
+}
+
+// int64ToBytes converts int64 to bytes.
+func int64ToBytes(i int64) []byte {
+ buf := make([]byte, 8)
+ binary.BigEndian.PutUint64(buf, uint64(i))
+ return buf
+}
+
+type zipkinSpan struct {
+ *Span
+
+ // peer points to the peer service participating in this span,
+ // e.g. the Client if this span is a server span,
+ // or Server if this span is a client span
+ peer struct {
+ Ipv4 int32
+ Port int16
+ ServiceName string
+ }
+
+ // used to distinguish local vs. RPC Server vs. RPC Client spans
+ spanKind string
+}
+
+func (s *zipkinSpan) handleSpecialTags() {
+ s.Lock()
+ defer s.Unlock()
+ if s.firstInProcess {
+ // append the process tags
+ s.tags = append(s.tags, s.tracer.tags...)
+ }
+ filteredTags := make([]Tag, 0, len(s.tags))
+ for _, tag := range s.tags {
+ if handler, ok := specialTagHandlers[tag.key]; ok {
+ handler(s, tag.value)
+ } else {
+ filteredTags = append(filteredTags, tag)
+ }
+ }
+ s.tags = filteredTags
+}
+
+func setSpanKind(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.spanKind = val
+ return
+ }
+ if val, ok := value.(ext.SpanKindEnum); ok {
+ s.spanKind = string(val)
+ }
+}
+
+func setPeerIPv4(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if ip, err := utils.ParseIPToUint32(val); err == nil {
+ s.peer.Ipv4 = int32(ip)
+ return
+ }
+ }
+ if val, ok := value.(uint32); ok {
+ s.peer.Ipv4 = int32(val)
+ return
+ }
+ if val, ok := value.(int32); ok {
+ s.peer.Ipv4 = val
+ }
+}
+
+func setPeerPort(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ if port, err := utils.ParsePort(val); err == nil {
+ s.peer.Port = int16(port)
+ return
+ }
+ }
+ if val, ok := value.(uint16); ok {
+ s.peer.Port = int16(val)
+ return
+ }
+ if val, ok := value.(int); ok {
+ s.peer.Port = int16(val)
+ }
+}
+
+func setPeerService(s *zipkinSpan, value interface{}) {
+ if val, ok := value.(string); ok {
+ s.peer.ServiceName = val
+ }
+}
+
+func removeTag(s *zipkinSpan, value interface{}) {}
+
+func (s *zipkinSpan) peerDefined() bool {
+ return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
+}
+
+func (s *zipkinSpan) isRPC() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
+}
+
+func (s *zipkinSpan) isRPCClient() bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.spanKind == string(ext.SpanKindRPCClientEnum)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
new file mode 100644
index 0000000..2a6a43e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Counter tracks the number of times an event has occurred
+type Counter interface {
+ // Inc adds the given value to the counter.
+ Inc(int64)
+}
+
+// NullCounter counter that does nothing
+var NullCounter Counter = nullCounter{}
+
+type nullCounter struct{}
+
+func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
new file mode 100644
index 0000000..0ead061
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// NSOptions defines the name and tags map associated with a factory namespace
+type NSOptions struct {
+ Name string
+ Tags map[string]string
+}
+
+// Options defines the information associated with a metric
+type Options struct {
+ Name string
+ Tags map[string]string
+ Help string
+}
+
+// TimerOptions defines the information associated with a metric
+type TimerOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []time.Duration
+}
+
+// HistogramOptions defines the information associated with a metric
+type HistogramOptions struct {
+ Name string
+ Tags map[string]string
+ Help string
+ Buckets []float64
+}
+
+// Factory creates new metrics
+type Factory interface {
+ Counter(metric Options) Counter
+ Timer(metric TimerOptions) Timer
+ Gauge(metric Options) Gauge
+ Histogram(metric HistogramOptions) Histogram
+
+ // Namespace returns a nested metrics factory.
+ Namespace(scope NSOptions) Factory
+}
+
+// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
+var NullFactory Factory = nullFactory{}
+
+type nullFactory struct{}
+
+func (nullFactory) Counter(options Options) Counter {
+ return NullCounter
+}
+func (nullFactory) Timer(options TimerOptions) Timer {
+ return NullTimer
+}
+func (nullFactory) Gauge(options Options) Gauge {
+ return NullGauge
+}
+func (nullFactory) Histogram(options HistogramOptions) Histogram {
+ return NullHistogram
+}
+func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
new file mode 100644
index 0000000..3c60639
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Gauge returns instantaneous measurements of something as an int64 value
+type Gauge interface {
+ // Update the gauge to the value passed in.
+ Update(int64)
+}
+
+// NullGauge gauge that does nothing
+var NullGauge Gauge = nullGauge{}
+
+type nullGauge struct{}
+
+func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
new file mode 100644
index 0000000..d3bd617
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Jaeger Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Histogram that keeps track of a distribution of values.
+type Histogram interface {
+ // Records the value passed in.
+ Record(float64)
+}
+
+// NullHistogram that does nothing
+var NullHistogram Histogram = nullHistogram{}
+
+type nullHistogram struct{}
+
+func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
new file mode 100644
index 0000000..c24445a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "sort"
+)
+
+// GetKey converts name+tags into a single string of the form
+// "name|tag1=value1|...|tagN=valueN", where tag names are
+// sorted alphabetically.
+func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
+ keys := make([]string, 0, len(tags))
+ for k := range tags {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ key := name
+ for _, k := range keys {
+ key = key + tagsSep + k + tagKVSep + tags[k]
+ }
+ return key
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
new file mode 100644
index 0000000..0df0c66
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
+//
+// It uses reflection to initialize a struct containing metrics fields
+// by assigning new Counter/Gauge/Timer values with the metric name retrieved
+// from the `metric` tag and stats tags retrieved from the `tags` tag.
+//
+// Note: all fields of the struct must be exported, have a `metric` tag, and be
+// of type Counter or Gauge or Timer.
+//
+// Errors during Init lead to a panic.
+func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
+ if err := Init(metrics, factory, globalTags); err != nil {
+ panic(err.Error())
+ }
+}
+
+// Init does the same as MustInit, but returns an error instead of
+// panicking.
+func Init(m interface{}, factory Factory, globalTags map[string]string) error {
+ // Allow user to opt out of reporting metrics by passing in nil.
+ if factory == nil {
+ factory = NullFactory
+ }
+
+ counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
+ gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
+ timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
+ histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
+
+ v := reflect.ValueOf(m).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ tags := make(map[string]string)
+ for k, v := range globalTags {
+ tags[k] = v
+ }
+ var buckets []float64
+ field := t.Field(i)
+ metric := field.Tag.Get("metric")
+ if metric == "" {
+ return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
+ }
+ if tagString := field.Tag.Get("tags"); tagString != "" {
+ tagPairs := strings.Split(tagString, ",")
+ for _, tagPair := range tagPairs {
+ tag := strings.Split(tagPair, "=")
+ if len(tag) != 2 {
+ return fmt.Errorf(
+ "Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
+ field.Name, tagPair, tagString)
+ }
+ tags[tag[0]] = tag[1]
+ }
+ }
+ if bucketString := field.Tag.Get("buckets"); bucketString != "" {
+ if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Parse timer duration buckets
+ return fmt.Errorf(
+ "Field [%s]: Buckets are not currently initialized for timer metrics",
+ field.Name)
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ bucketValues := strings.Split(bucketString, ",")
+ for _, bucket := range bucketValues {
+ b, err := strconv.ParseFloat(bucket, 64)
+ if err != nil {
+ return fmt.Errorf(
+ "Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
+ field.Name, bucket, bucketString)
+ }
+ buckets = append(buckets, b)
+ }
+ } else {
+ return fmt.Errorf(
+ "Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
+ field.Name)
+ }
+ }
+ help := field.Tag.Get("help")
+ var obj interface{}
+ if field.Type.AssignableTo(counterPtrType) {
+ obj = factory.Counter(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(gaugePtrType) {
+ obj = factory.Gauge(Options{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(timerPtrType) {
+ // TODO: Add buckets once parsed (see TODO above)
+ obj = factory.Timer(TimerOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ })
+ } else if field.Type.AssignableTo(histogramPtrType) {
+ obj = factory.Histogram(HistogramOptions{
+ Name: metric,
+ Tags: tags,
+ Help: help,
+ Buckets: buckets,
+ })
+ } else {
+ return fmt.Errorf(
+ "Field %s is not a pointer to timer, gauge, or counter",
+ field.Name)
+ }
+ v.Field(i).Set(reflect.ValueOf(obj))
+ }
+ return nil
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
new file mode 100644
index 0000000..4a8abdb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// StartStopwatch begins recording the executing time of an event, returning
+// a Stopwatch that should be used to stop the recording the time for
+// that event. Multiple events can be occurring simultaneously each
+// represented by different active Stopwatches
+func StartStopwatch(timer Timer) Stopwatch {
+ return Stopwatch{t: timer, start: time.Now()}
+}
+
+// A Stopwatch tracks the execution time of a specific event
+type Stopwatch struct {
+ t Timer
+ start time.Time
+}
+
+// Stop stops executing of the stopwatch and records the amount of elapsed time
+func (s Stopwatch) Stop() {
+ s.t.Record(s.ElapsedTime())
+}
+
+// ElapsedTime returns the amount of elapsed time (in time.Duration)
+func (s Stopwatch) ElapsedTime() time.Duration {
+ return time.Since(s.start)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
new file mode 100644
index 0000000..e18d222
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+ "time"
+)
+
+// Timer accumulates observations about how long some operation took,
+// and also maintains a historgam of percentiles.
+type Timer interface {
+ // Records the time passed in.
+ Record(time.Duration)
+}
+
+// NullTimer timer that does nothing
+var NullTimer Timer = nullTimer{}
+
+type nullTimer struct{}
+
+func (nullTimer) Record(time.Duration) {}
diff --git a/vendor/go.uber.org/atomic/.codecov.yml b/vendor/go.uber.org/atomic/.codecov.yml
new file mode 100644
index 0000000..571116c
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.codecov.yml
@@ -0,0 +1,19 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
+# Also update COVER_IGNORE_PKGS in the Makefile.
+ignore:
+ - /internal/gen-atomicint/
+ - /internal/gen-valuewrapper/
diff --git a/vendor/go.uber.org/atomic/.gitignore b/vendor/go.uber.org/atomic/.gitignore
new file mode 100644
index 0000000..c3fa253
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.gitignore
@@ -0,0 +1,12 @@
+/bin
+.DS_Store
+/vendor
+cover.html
+cover.out
+lint.log
+
+# Binaries
+*.test
+
+# Profiling output
+*.prof
diff --git a/vendor/go.uber.org/atomic/.travis.yml b/vendor/go.uber.org/atomic/.travis.yml
new file mode 100644
index 0000000..13d0a4f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/.travis.yml
@@ -0,0 +1,27 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/atomic
+
+env:
+ global:
+ - GO111MODULE=on
+
+matrix:
+ include:
+ - go: oldstable
+ - go: stable
+ env: LINT=1
+
+cache:
+ directories:
+ - vendor
+
+before_install:
+ - go version
+
+script:
+ - test -z "$LINT" || make lint
+ - make cover
+
+after_success:
+ - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/atomic/CHANGELOG.md b/vendor/go.uber.org/atomic/CHANGELOG.md
new file mode 100644
index 0000000..24c0274
--- /dev/null
+++ b/vendor/go.uber.org/atomic/CHANGELOG.md
@@ -0,0 +1,76 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [1.7.0] - 2020-09-14
+### Added
+- Support JSON serialization and deserialization of primitive atomic types.
+- Support Text marshalling and unmarshalling for string atomics.
+
+### Changed
+- Disallow incorrect comparison of atomic values in a non-atomic way.
+
+### Removed
+- Remove dependency on `golang.org/x/{lint, tools}`.
+
+## [1.6.0] - 2020-02-24
+### Changed
+- Drop library dependency on `golang.org/x/{lint, tools}`.
+
+## [1.5.1] - 2019-11-19
+- Fix bug where `Bool.CAS` and `Bool.Toggle` do work correctly together
+ causing `CAS` to fail even though the old value matches.
+
+## [1.5.0] - 2019-10-29
+### Changed
+- With Go modules, only the `go.uber.org/atomic` import path is supported now.
+ If you need to use the old import path, please add a `replace` directive to
+ your `go.mod`.
+
+## [1.4.0] - 2019-05-01
+### Added
+ - Add `atomic.Error` type for atomic operations on `error` values.
+
+## [1.3.2] - 2018-05-02
+### Added
+- Add `atomic.Duration` type for atomic operations on `time.Duration` values.
+
+## [1.3.1] - 2017-11-14
+### Fixed
+- Revert optimization for `atomic.String.Store("")` which caused data races.
+
+## [1.3.0] - 2017-11-13
+### Added
+- Add `atomic.Bool.CAS` for compare-and-swap semantics on bools.
+
+### Changed
+- Optimize `atomic.String.Store("")` by avoiding an allocation.
+
+## [1.2.0] - 2017-04-12
+### Added
+- Shadow `atomic.Value` from `sync/atomic`.
+
+## [1.1.0] - 2017-03-10
+### Added
+- Add atomic `Float64` type.
+
+### Changed
+- Support new `go.uber.org/atomic` import path.
+
+## [1.0.0] - 2016-07-18
+
+- Initial release.
+
+[1.7.0]: https://github.com/uber-go/atomic/compare/v1.6.0...v1.7.0
+[1.6.0]: https://github.com/uber-go/atomic/compare/v1.5.1...v1.6.0
+[1.5.1]: https://github.com/uber-go/atomic/compare/v1.5.0...v1.5.1
+[1.5.0]: https://github.com/uber-go/atomic/compare/v1.4.0...v1.5.0
+[1.4.0]: https://github.com/uber-go/atomic/compare/v1.3.2...v1.4.0
+[1.3.2]: https://github.com/uber-go/atomic/compare/v1.3.1...v1.3.2
+[1.3.1]: https://github.com/uber-go/atomic/compare/v1.3.0...v1.3.1
+[1.3.0]: https://github.com/uber-go/atomic/compare/v1.2.0...v1.3.0
+[1.2.0]: https://github.com/uber-go/atomic/compare/v1.1.0...v1.2.0
+[1.1.0]: https://github.com/uber-go/atomic/compare/v1.0.0...v1.1.0
+[1.0.0]: https://github.com/uber-go/atomic/releases/tag/v1.0.0
diff --git a/vendor/go.uber.org/atomic/LICENSE.txt b/vendor/go.uber.org/atomic/LICENSE.txt
new file mode 100644
index 0000000..8765c9f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/atomic/Makefile b/vendor/go.uber.org/atomic/Makefile
new file mode 100644
index 0000000..1b1376d
--- /dev/null
+++ b/vendor/go.uber.org/atomic/Makefile
@@ -0,0 +1,78 @@
+# Directory to place `go install`ed binaries into.
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+GEN_ATOMICINT = $(GOBIN)/gen-atomicint
+GEN_ATOMICWRAPPER = $(GOBIN)/gen-atomicwrapper
+STATICCHECK = $(GOBIN)/staticcheck
+
+GO_FILES ?= $(shell find . '(' -path .git -o -path vendor ')' -prune -o -name '*.go' -print)
+
+# Also update ignore section in .codecov.yml.
+COVER_IGNORE_PKGS = \
+ go.uber.org/atomic/internal/gen-atomicint \
+ go.uber.org/atomic/internal/gen-atomicwrapper
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" && cat $(FMT_LOG) && false)
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+$(GEN_ATOMICWRAPPER): $(wildcard ./internal/gen-atomicwrapper/*)
+ go build -o $@ ./internal/gen-atomicwrapper
+
+$(GEN_ATOMICINT): $(wildcard ./internal/gen-atomicint/*)
+ go build -o $@ ./internal/gen-atomicint
+
+.PHONY: golint
+golint: $(GOLINT)
+ $(GOLINT) ./...
+
+.PHONY: staticcheck
+staticcheck: $(STATICCHECK)
+ $(STATICCHECK) ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck generatenodirty
+
+# comma separated list of packages to consider for code coverage.
+COVER_PKG = $(shell \
+ go list -find ./... | \
+ grep -v $(foreach pkg,$(COVER_IGNORE_PKGS),-e "^$(pkg)$$") | \
+ paste -sd, -)
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg $(COVER_PKG) -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: generate
+generate: $(GEN_ATOMICINT) $(GEN_ATOMICWRAPPER)
+ go generate ./...
+
+.PHONY: generatenodirty
+generatenodirty:
+ @[ -z "$$(git status --porcelain)" ] || ( \
+ echo "Working tree is dirty. Commit your changes first."; \
+ exit 1 )
+ @make generate
+ @status=$$(git status --porcelain); \
+ [ -z "$$status" ] || ( \
+ echo "Working tree is dirty after `make generate`:"; \
+ echo "$$status"; \
+ echo "Please ensure that the generated code is up-to-date." )
diff --git a/vendor/go.uber.org/atomic/README.md b/vendor/go.uber.org/atomic/README.md
new file mode 100644
index 0000000..ade0c20
--- /dev/null
+++ b/vendor/go.uber.org/atomic/README.md
@@ -0,0 +1,63 @@
+# atomic [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![Go Report Card][reportcard-img]][reportcard]
+
+Simple wrappers for primitive types to enforce atomic access.
+
+## Installation
+
+```shell
+$ go get -u go.uber.org/atomic@v1
+```
+
+### Legacy Import Path
+
+As of v1.5.0, the import path `go.uber.org/atomic` is the only supported way
+of using this package. If you are using Go modules, this package will fail to
+compile with the legacy import path path `github.com/uber-go/atomic`.
+
+We recommend migrating your code to the new import path but if you're unable
+to do so, or if your dependencies are still using the old import path, you
+will have to add a `replace` directive to your `go.mod` file downgrading the
+legacy import path to an older version.
+
+```
+replace github.com/uber-go/atomic => github.com/uber-go/atomic v1.4.0
+```
+
+You can do so automatically by running the following command.
+
+```shell
+$ go mod edit -replace github.com/uber-go/atomic=github.com/uber-go/atomic@v1.4.0
+```
+
+## Usage
+
+The standard library's `sync/atomic` is powerful, but it's easy to forget which
+variables must be accessed atomically. `go.uber.org/atomic` preserves all the
+functionality of the standard library, but wraps the primitive types to
+provide a safer, more convenient API.
+
+```go
+var atom atomic.Uint32
+atom.Store(42)
+atom.Sub(2)
+atom.CAS(40, 11)
+```
+
+See the [documentation][doc] for a complete API specification.
+
+## Development Status
+
+Stable.
+
+---
+
+Released under the [MIT License](LICENSE.txt).
+
+[doc-img]: https://godoc.org/github.com/uber-go/atomic?status.svg
+[doc]: https://godoc.org/go.uber.org/atomic
+[ci-img]: https://travis-ci.com/uber-go/atomic.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/atomic
+[cov-img]: https://codecov.io/gh/uber-go/atomic/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/atomic
+[reportcard-img]: https://goreportcard.com/badge/go.uber.org/atomic
+[reportcard]: https://goreportcard.com/report/go.uber.org/atomic
diff --git a/vendor/go.uber.org/atomic/bool.go b/vendor/go.uber.org/atomic/bool.go
new file mode 100644
index 0000000..9cf1914
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool.go
@@ -0,0 +1,81 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+)
+
+// Bool is an atomic type-safe wrapper for bool values.
+type Bool struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint32
+}
+
+var _zeroBool bool
+
+// NewBool creates a new Bool.
+func NewBool(v bool) *Bool {
+ x := &Bool{}
+ if v != _zeroBool {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped bool.
+func (x *Bool) Load() bool {
+ return truthy(x.v.Load())
+}
+
+// Store atomically stores the passed bool.
+func (x *Bool) Store(v bool) {
+ x.v.Store(boolToInt(v))
+}
+
+// CAS is an atomic compare-and-swap for bool values.
+func (x *Bool) CAS(o, n bool) bool {
+ return x.v.CAS(boolToInt(o), boolToInt(n))
+}
+
+// Swap atomically stores the given bool and returns the old
+// value.
+func (x *Bool) Swap(o bool) bool {
+ return truthy(x.v.Swap(boolToInt(o)))
+}
+
+// MarshalJSON encodes the wrapped bool into JSON.
+func (x *Bool) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a bool from JSON.
+func (x *Bool) UnmarshalJSON(b []byte) error {
+ var v bool
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/bool_ext.go b/vendor/go.uber.org/atomic/bool_ext.go
new file mode 100644
index 0000000..c7bf7a8
--- /dev/null
+++ b/vendor/go.uber.org/atomic/bool_ext.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "strconv"
+)
+
+//go:generate bin/gen-atomicwrapper -name=Bool -type=bool -wrapped=Uint32 -pack=boolToInt -unpack=truthy -cas -swap -json -file=bool.go
+
+func truthy(n uint32) bool {
+ return n == 1
+}
+
+func boolToInt(b bool) uint32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// Toggle atomically negates the Boolean and returns the previous value.
+func (b *Bool) Toggle() bool {
+ for {
+ old := b.Load()
+ if b.CAS(old, !old) {
+ return old
+ }
+ }
+}
+
+// String encodes the wrapped value as a string.
+func (b *Bool) String() string {
+ return strconv.FormatBool(b.Load())
+}
diff --git a/vendor/go.uber.org/atomic/doc.go b/vendor/go.uber.org/atomic/doc.go
new file mode 100644
index 0000000..ae7390e
--- /dev/null
+++ b/vendor/go.uber.org/atomic/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package atomic provides simple wrappers around numerics to enforce atomic
+// access.
+package atomic
diff --git a/vendor/go.uber.org/atomic/duration.go b/vendor/go.uber.org/atomic/duration.go
new file mode 100644
index 0000000..027cfcb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration.go
@@ -0,0 +1,82 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// Duration is an atomic type-safe wrapper for time.Duration values.
+type Duration struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Int64
+}
+
+var _zeroDuration time.Duration
+
+// NewDuration creates a new Duration.
+func NewDuration(v time.Duration) *Duration {
+ x := &Duration{}
+ if v != _zeroDuration {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped time.Duration.
+func (x *Duration) Load() time.Duration {
+ return time.Duration(x.v.Load())
+}
+
+// Store atomically stores the passed time.Duration.
+func (x *Duration) Store(v time.Duration) {
+ x.v.Store(int64(v))
+}
+
+// CAS is an atomic compare-and-swap for time.Duration values.
+func (x *Duration) CAS(o, n time.Duration) bool {
+ return x.v.CAS(int64(o), int64(n))
+}
+
+// Swap atomically stores the given time.Duration and returns the old
+// value.
+func (x *Duration) Swap(o time.Duration) time.Duration {
+ return time.Duration(x.v.Swap(int64(o)))
+}
+
+// MarshalJSON encodes the wrapped time.Duration into JSON.
+func (x *Duration) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a time.Duration from JSON.
+func (x *Duration) UnmarshalJSON(b []byte) error {
+ var v time.Duration
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/duration_ext.go b/vendor/go.uber.org/atomic/duration_ext.go
new file mode 100644
index 0000000..6273b66
--- /dev/null
+++ b/vendor/go.uber.org/atomic/duration_ext.go
@@ -0,0 +1,40 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "time"
+
+//go:generate bin/gen-atomicwrapper -name=Duration -type=time.Duration -wrapped=Int64 -pack=int64 -unpack=time.Duration -cas -swap -json -imports time -file=duration.go
+
+// Add atomically adds to the wrapped time.Duration and returns the new value.
+func (d *Duration) Add(n time.Duration) time.Duration {
+ return time.Duration(d.v.Add(int64(n)))
+}
+
+// Sub atomically subtracts from the wrapped time.Duration and returns the new value.
+func (d *Duration) Sub(n time.Duration) time.Duration {
+ return time.Duration(d.v.Sub(int64(n)))
+}
+
+// String encodes the wrapped value as a string.
+func (d *Duration) String() string {
+ return d.Load().String()
+}
diff --git a/vendor/go.uber.org/atomic/error.go b/vendor/go.uber.org/atomic/error.go
new file mode 100644
index 0000000..a6166fb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error.go
@@ -0,0 +1,51 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// Error is an atomic type-safe wrapper for error values.
+type Error struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroError error
+
+// NewError creates a new Error.
+func NewError(v error) *Error {
+ x := &Error{}
+ if v != _zeroError {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped error.
+func (x *Error) Load() error {
+ return unpackError(x.v.Load())
+}
+
+// Store atomically stores the passed error.
+func (x *Error) Store(v error) {
+ x.v.Store(packError(v))
+}
diff --git a/vendor/go.uber.org/atomic/error_ext.go b/vendor/go.uber.org/atomic/error_ext.go
new file mode 100644
index 0000000..ffe0be2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/error_ext.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// atomic.Value panics on nil inputs, or if the underlying type changes.
+// Stabilize by always storing a custom struct that we control.
+
+//go:generate bin/gen-atomicwrapper -name=Error -type=error -wrapped=Value -pack=packError -unpack=unpackError -file=error.go
+
+type packedError struct{ Value error }
+
+func packError(v error) interface{} {
+ return packedError{v}
+}
+
+func unpackError(v interface{}) error {
+ if err, ok := v.(packedError); ok {
+ return err.Value
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64.go b/vendor/go.uber.org/atomic/float64.go
new file mode 100644
index 0000000..0719060
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64.go
@@ -0,0 +1,76 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "math"
+)
+
+// Float64 is an atomic type-safe wrapper for float64 values.
+type Float64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Uint64
+}
+
+var _zeroFloat64 float64
+
+// NewFloat64 creates a new Float64.
+func NewFloat64(v float64) *Float64 {
+ x := &Float64{}
+ if v != _zeroFloat64 {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped float64.
+func (x *Float64) Load() float64 {
+ return math.Float64frombits(x.v.Load())
+}
+
+// Store atomically stores the passed float64.
+func (x *Float64) Store(v float64) {
+ x.v.Store(math.Float64bits(v))
+}
+
+// CAS is an atomic compare-and-swap for float64 values.
+func (x *Float64) CAS(o, n float64) bool {
+ return x.v.CAS(math.Float64bits(o), math.Float64bits(n))
+}
+
+// MarshalJSON encodes the wrapped float64 into JSON.
+func (x *Float64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(x.Load())
+}
+
+// UnmarshalJSON decodes a float64 from JSON.
+func (x *Float64) UnmarshalJSON(b []byte) error {
+ var v float64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ x.Store(v)
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/float64_ext.go b/vendor/go.uber.org/atomic/float64_ext.go
new file mode 100644
index 0000000..927b1ad
--- /dev/null
+++ b/vendor/go.uber.org/atomic/float64_ext.go
@@ -0,0 +1,47 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "strconv"
+
+//go:generate bin/gen-atomicwrapper -name=Float64 -type=float64 -wrapped=Uint64 -pack=math.Float64bits -unpack=math.Float64frombits -cas -json -imports math -file=float64.go
+
+// Add atomically adds to the wrapped float64 and returns the new value.
+func (f *Float64) Add(s float64) float64 {
+ for {
+ old := f.Load()
+ new := old + s
+ if f.CAS(old, new) {
+ return new
+ }
+ }
+}
+
+// Sub atomically subtracts from the wrapped float64 and returns the new value.
+func (f *Float64) Sub(s float64) float64 {
+ return f.Add(-s)
+}
+
+// String encodes the wrapped value as a string.
+func (f *Float64) String() string {
+ // 'g' is the behavior for floats with %v.
+ return strconv.FormatFloat(f.Load(), 'g', -1, 64)
+}
diff --git a/vendor/go.uber.org/atomic/gen.go b/vendor/go.uber.org/atomic/gen.go
new file mode 100644
index 0000000..50d6b24
--- /dev/null
+++ b/vendor/go.uber.org/atomic/gen.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicint -name=Int32 -wrapped=int32 -file=int32.go
+//go:generate bin/gen-atomicint -name=Int64 -wrapped=int64 -file=int64.go
+//go:generate bin/gen-atomicint -name=Uint32 -wrapped=uint32 -unsigned -file=uint32.go
+//go:generate bin/gen-atomicint -name=Uint64 -wrapped=uint64 -unsigned -file=uint64.go
diff --git a/vendor/go.uber.org/atomic/go.mod b/vendor/go.uber.org/atomic/go.mod
new file mode 100644
index 0000000..daa7599
--- /dev/null
+++ b/vendor/go.uber.org/atomic/go.mod
@@ -0,0 +1,8 @@
+module go.uber.org/atomic
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/stretchr/testify v1.3.0
+)
+
+go 1.13
diff --git a/vendor/go.uber.org/atomic/go.sum b/vendor/go.uber.org/atomic/go.sum
new file mode 100644
index 0000000..4f76e62
--- /dev/null
+++ b/vendor/go.uber.org/atomic/go.sum
@@ -0,0 +1,9 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/go.uber.org/atomic/int32.go b/vendor/go.uber.org/atomic/int32.go
new file mode 100644
index 0000000..18ae564
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int32 is an atomic wrapper around int32.
+type Int32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int32
+}
+
+// NewInt32 creates a new Int32.
+func NewInt32(i int32) *Int32 {
+ return &Int32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int32) Load() int32 {
+ return atomic.LoadInt32(&i.v)
+}
+
+// Add atomically adds to the wrapped int32 and returns the new value.
+func (i *Int32) Add(n int32) int32 {
+ return atomic.AddInt32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int32 and returns the new value.
+func (i *Int32) Sub(n int32) int32 {
+ return atomic.AddInt32(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int32 and returns the new value.
+func (i *Int32) Inc() int32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int32 and returns the new value.
+func (i *Int32) Dec() int32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int32) CAS(old, new int32) bool {
+ return atomic.CompareAndSwapInt32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int32) Store(n int32) {
+ atomic.StoreInt32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int32 and returns the old value.
+func (i *Int32) Swap(n int32) int32 {
+ return atomic.SwapInt32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int32 into JSON.
+func (i *Int32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int32.
+func (i *Int32) UnmarshalJSON(b []byte) error {
+ var v int32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int32) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/int64.go b/vendor/go.uber.org/atomic/int64.go
new file mode 100644
index 0000000..2bcbbfa
--- /dev/null
+++ b/vendor/go.uber.org/atomic/int64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Int64 is an atomic wrapper around int64.
+type Int64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v int64
+}
+
+// NewInt64 creates a new Int64.
+func NewInt64(i int64) *Int64 {
+ return &Int64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Int64) Load() int64 {
+ return atomic.LoadInt64(&i.v)
+}
+
+// Add atomically adds to the wrapped int64 and returns the new value.
+func (i *Int64) Add(n int64) int64 {
+ return atomic.AddInt64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped int64 and returns the new value.
+func (i *Int64) Sub(n int64) int64 {
+ return atomic.AddInt64(&i.v, -n)
+}
+
+// Inc atomically increments the wrapped int64 and returns the new value.
+func (i *Int64) Inc() int64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped int64 and returns the new value.
+func (i *Int64) Dec() int64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Int64) CAS(old, new int64) bool {
+ return atomic.CompareAndSwapInt64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Int64) Store(n int64) {
+ atomic.StoreInt64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped int64 and returns the old value.
+func (i *Int64) Swap(n int64) int64 {
+ return atomic.SwapInt64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped int64 into JSON.
+func (i *Int64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped int64.
+func (i *Int64) UnmarshalJSON(b []byte) error {
+ var v int64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Int64) String() string {
+ v := i.Load()
+ return strconv.FormatInt(int64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/nocmp.go b/vendor/go.uber.org/atomic/nocmp.go
new file mode 100644
index 0000000..a8201cb
--- /dev/null
+++ b/vendor/go.uber.org/atomic/nocmp.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// nocmp is an uncomparable struct. Embed this inside another struct to make
+// it uncomparable.
+//
+// type Foo struct {
+// nocmp
+// // ...
+// }
+//
+// This DOES NOT:
+//
+// - Disallow shallow copies of structs
+// - Disallow comparison of pointers to uncomparable structs
+type nocmp [0]func()
diff --git a/vendor/go.uber.org/atomic/string.go b/vendor/go.uber.org/atomic/string.go
new file mode 100644
index 0000000..225b7a2
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string.go
@@ -0,0 +1,54 @@
+// @generated Code generated by gen-atomicwrapper.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+// String is an atomic type-safe wrapper for string values.
+type String struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v Value
+}
+
+var _zeroString string
+
+// NewString creates a new String.
+func NewString(v string) *String {
+ x := &String{}
+ if v != _zeroString {
+ x.Store(v)
+ }
+ return x
+}
+
+// Load atomically loads the wrapped string.
+func (x *String) Load() string {
+ if v := x.v.Load(); v != nil {
+ return v.(string)
+ }
+ return _zeroString
+}
+
+// Store atomically stores the passed string.
+func (x *String) Store(v string) {
+ x.v.Store(v)
+}
diff --git a/vendor/go.uber.org/atomic/string_ext.go b/vendor/go.uber.org/atomic/string_ext.go
new file mode 100644
index 0000000..3a95582
--- /dev/null
+++ b/vendor/go.uber.org/atomic/string_ext.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+//go:generate bin/gen-atomicwrapper -name=String -type=string -wrapped=Value -file=string.go
+
+// String returns the wrapped value.
+func (s *String) String() string {
+ return s.Load()
+}
+
+// MarshalText encodes the wrapped string into a textual form.
+//
+// This makes it encodable as JSON, YAML, XML, and more.
+func (s *String) MarshalText() ([]byte, error) {
+ return []byte(s.Load()), nil
+}
+
+// UnmarshalText decodes text and replaces the wrapped string with it.
+//
+// This makes it decodable from JSON, YAML, XML, and more.
+func (s *String) UnmarshalText(b []byte) error {
+ s.Store(string(b))
+ return nil
+}
diff --git a/vendor/go.uber.org/atomic/uint32.go b/vendor/go.uber.org/atomic/uint32.go
new file mode 100644
index 0000000..a973aba
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint32.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint32 is an atomic wrapper around uint32.
+type Uint32 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint32
+}
+
+// NewUint32 creates a new Uint32.
+func NewUint32(i uint32) *Uint32 {
+ return &Uint32{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint32) Load() uint32 {
+ return atomic.LoadUint32(&i.v)
+}
+
+// Add atomically adds to the wrapped uint32 and returns the new value.
+func (i *Uint32) Add(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint32 and returns the new value.
+func (i *Uint32) Sub(n uint32) uint32 {
+ return atomic.AddUint32(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint32 and returns the new value.
+func (i *Uint32) Inc() uint32 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint32 and returns the new value.
+func (i *Uint32) Dec() uint32 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint32) CAS(old, new uint32) bool {
+ return atomic.CompareAndSwapUint32(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint32) Store(n uint32) {
+ atomic.StoreUint32(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint32 and returns the old value.
+func (i *Uint32) Swap(n uint32) uint32 {
+ return atomic.SwapUint32(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint32 into JSON.
+func (i *Uint32) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint32.
+func (i *Uint32) UnmarshalJSON(b []byte) error {
+ var v uint32
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint32) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/uint64.go b/vendor/go.uber.org/atomic/uint64.go
new file mode 100644
index 0000000..3b6c71f
--- /dev/null
+++ b/vendor/go.uber.org/atomic/uint64.go
@@ -0,0 +1,102 @@
+// @generated Code generated by gen-atomicint.
+
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import (
+ "encoding/json"
+ "strconv"
+ "sync/atomic"
+)
+
+// Uint64 is an atomic wrapper around uint64.
+type Uint64 struct {
+ _ nocmp // disallow non-atomic comparison
+
+ v uint64
+}
+
+// NewUint64 creates a new Uint64.
+func NewUint64(i uint64) *Uint64 {
+ return &Uint64{v: i}
+}
+
+// Load atomically loads the wrapped value.
+func (i *Uint64) Load() uint64 {
+ return atomic.LoadUint64(&i.v)
+}
+
+// Add atomically adds to the wrapped uint64 and returns the new value.
+func (i *Uint64) Add(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, n)
+}
+
+// Sub atomically subtracts from the wrapped uint64 and returns the new value.
+func (i *Uint64) Sub(n uint64) uint64 {
+ return atomic.AddUint64(&i.v, ^(n - 1))
+}
+
+// Inc atomically increments the wrapped uint64 and returns the new value.
+func (i *Uint64) Inc() uint64 {
+ return i.Add(1)
+}
+
+// Dec atomically decrements the wrapped uint64 and returns the new value.
+func (i *Uint64) Dec() uint64 {
+ return i.Sub(1)
+}
+
+// CAS is an atomic compare-and-swap.
+func (i *Uint64) CAS(old, new uint64) bool {
+ return atomic.CompareAndSwapUint64(&i.v, old, new)
+}
+
+// Store atomically stores the passed value.
+func (i *Uint64) Store(n uint64) {
+ atomic.StoreUint64(&i.v, n)
+}
+
+// Swap atomically swaps the wrapped uint64 and returns the old value.
+func (i *Uint64) Swap(n uint64) uint64 {
+ return atomic.SwapUint64(&i.v, n)
+}
+
+// MarshalJSON encodes the wrapped uint64 into JSON.
+func (i *Uint64) MarshalJSON() ([]byte, error) {
+ return json.Marshal(i.Load())
+}
+
+// UnmarshalJSON decodes JSON into the wrapped uint64.
+func (i *Uint64) UnmarshalJSON(b []byte) error {
+ var v uint64
+ if err := json.Unmarshal(b, &v); err != nil {
+ return err
+ }
+ i.Store(v)
+ return nil
+}
+
+// String encodes the wrapped value as a string.
+func (i *Uint64) String() string {
+ v := i.Load()
+ return strconv.FormatUint(uint64(v), 10)
+}
diff --git a/vendor/go.uber.org/atomic/value.go b/vendor/go.uber.org/atomic/value.go
new file mode 100644
index 0000000..671f3a3
--- /dev/null
+++ b/vendor/go.uber.org/atomic/value.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package atomic
+
+import "sync/atomic"
+
+// Value shadows the type of the same name from sync/atomic
+// https://godoc.org/sync/atomic#Value
+type Value struct {
+ atomic.Value
+
+ _ nocmp // disallow non-atomic comparison
+}
diff --git a/vendor/go.uber.org/multierr/.codecov.yml b/vendor/go.uber.org/multierr/.codecov.yml
new file mode 100644
index 0000000..6d4d1be
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.codecov.yml
@@ -0,0 +1,15 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 100 # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+
diff --git a/vendor/go.uber.org/multierr/.gitignore b/vendor/go.uber.org/multierr/.gitignore
new file mode 100644
index 0000000..b9a05e3
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.gitignore
@@ -0,0 +1,4 @@
+/vendor
+cover.html
+cover.out
+/bin
diff --git a/vendor/go.uber.org/multierr/.travis.yml b/vendor/go.uber.org/multierr/.travis.yml
new file mode 100644
index 0000000..8636ab4
--- /dev/null
+++ b/vendor/go.uber.org/multierr/.travis.yml
@@ -0,0 +1,23 @@
+sudo: false
+language: go
+go_import_path: go.uber.org/multierr
+
+env:
+ global:
+ - GO111MODULE=on
+
+go:
+ - oldstable
+ - stable
+
+before_install:
+- go version
+
+script:
+- |
+ set -e
+ make lint
+ make cover
+
+after_success:
+- bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/multierr/CHANGELOG.md b/vendor/go.uber.org/multierr/CHANGELOG.md
new file mode 100644
index 0000000..6f1db9e
--- /dev/null
+++ b/vendor/go.uber.org/multierr/CHANGELOG.md
@@ -0,0 +1,60 @@
+Releases
+========
+
+v1.6.0 (2020-09-14)
+===================
+
+- Actually drop library dependency on development-time tooling.
+
+
+v1.5.0 (2020-02-24)
+===================
+
+- Drop library dependency on development-time tooling.
+
+
+v1.4.0 (2019-11-04)
+===================
+
+- Add `AppendInto` function to more ergonomically build errors inside a
+ loop.
+
+
+v1.3.0 (2019-10-29)
+===================
+
+- Switch to Go modules.
+
+
+v1.2.0 (2019-09-26)
+===================
+
+- Support extracting and matching against wrapped errors with `errors.As`
+ and `errors.Is`.
+
+
+v1.1.0 (2017-06-30)
+===================
+
+- Added an `Errors(error) []error` function to extract the underlying list of
+ errors for a multierr error.
+
+
+v1.0.0 (2017-05-31)
+===================
+
+No changes since v0.2.0. This release is committing to making no breaking
+changes to the current API in the 1.X series.
+
+
+v0.2.0 (2017-04-11)
+===================
+
+- Repeatedly appending to the same error is now faster due to fewer
+ allocations.
+
+
+v0.1.0 (2017-31-03)
+===================
+
+- Initial release
diff --git a/vendor/go.uber.org/multierr/LICENSE.txt b/vendor/go.uber.org/multierr/LICENSE.txt
new file mode 100644
index 0000000..858e024
--- /dev/null
+++ b/vendor/go.uber.org/multierr/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/multierr/Makefile b/vendor/go.uber.org/multierr/Makefile
new file mode 100644
index 0000000..3160044
--- /dev/null
+++ b/vendor/go.uber.org/multierr/Makefile
@@ -0,0 +1,42 @@
+# Directory to put `go install`ed binaries in.
+export GOBIN ?= $(shell pwd)/bin
+
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: build
+build:
+ go build ./...
+
+.PHONY: test
+test:
+ go test -race ./...
+
+.PHONY: gofmt
+gofmt:
+ $(eval FMT_LOG := $(shell mktemp -t gofmt.XXXXX))
+ @gofmt -e -s -l $(GO_FILES) > $(FMT_LOG) || true
+ @[ ! -s "$(FMT_LOG)" ] || (echo "gofmt failed:" | cat - $(FMT_LOG) && false)
+
+.PHONY: golint
+golint:
+ @cd tools && go install golang.org/x/lint/golint
+ @$(GOBIN)/golint ./...
+
+.PHONY: staticcheck
+staticcheck:
+ @cd tools && go install honnef.co/go/tools/cmd/staticcheck
+ @$(GOBIN)/staticcheck ./...
+
+.PHONY: lint
+lint: gofmt golint staticcheck
+
+.PHONY: cover
+cover:
+ go test -coverprofile=cover.out -coverpkg=./... -v ./...
+ go tool cover -html=cover.out -o cover.html
+
+update-license:
+ @cd tools && go install go.uber.org/tools/update-license
+ @$(GOBIN)/update-license $(GO_FILES)
diff --git a/vendor/go.uber.org/multierr/README.md b/vendor/go.uber.org/multierr/README.md
new file mode 100644
index 0000000..751bd65
--- /dev/null
+++ b/vendor/go.uber.org/multierr/README.md
@@ -0,0 +1,23 @@
+# multierr [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+`multierr` allows combining one or more Go `error`s together.
+
+## Installation
+
+ go get -u go.uber.org/multierr
+
+## Status
+
+Stable: No breaking changes will be made before 2.0.
+
+-------------------------------------------------------------------------------
+
+Released under the [MIT License].
+
+[MIT License]: LICENSE.txt
+[doc-img]: https://godoc.org/go.uber.org/multierr?status.svg
+[doc]: https://godoc.org/go.uber.org/multierr
+[ci-img]: https://travis-ci.com/uber-go/multierr.svg?branch=master
+[cov-img]: https://codecov.io/gh/uber-go/multierr/branch/master/graph/badge.svg
+[ci]: https://travis-ci.com/uber-go/multierr
+[cov]: https://codecov.io/gh/uber-go/multierr
diff --git a/vendor/go.uber.org/multierr/error.go b/vendor/go.uber.org/multierr/error.go
new file mode 100644
index 0000000..5c9b67d
--- /dev/null
+++ b/vendor/go.uber.org/multierr/error.go
@@ -0,0 +1,449 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package multierr allows combining one or more errors together.
+//
+// Overview
+//
+// Errors can be combined with the use of the Combine function.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// conn.Close(),
+// )
+//
+// If only two errors are being combined, the Append function may be used
+// instead.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// This makes it possible to record resource cleanup failures from deferred
+// blocks with the help of named return values.
+//
+// func sendRequest(req Request) (err error) {
+// conn, err := openConnection()
+// if err != nil {
+// return err
+// }
+// defer func() {
+// err = multierr.Append(err, conn.Close())
+// }()
+// // ...
+// }
+//
+// The underlying list of errors for a returned error object may be retrieved
+// with the Errors function.
+//
+// errors := multierr.Errors(err)
+// if len(errors) > 0 {
+// fmt.Println("The following errors occurred:", errors)
+// }
+//
+// Advanced Usage
+//
+// Errors returned by Combine and Append MAY implement the following
+// interface.
+//
+// type errorGroup interface {
+// // Returns a slice containing the underlying list of errors.
+// //
+// // This slice MUST NOT be modified by the caller.
+// Errors() []error
+// }
+//
+// Note that if you need access to list of errors behind a multierr error, you
+// should prefer using the Errors function. That said, if you need cheap
+// read-only access to the underlying errors slice, you can attempt to cast
+// the error to this interface. You MUST handle the failure case gracefully
+// because errors returned by Combine and Append are not guaranteed to
+// implement this interface.
+//
+// var errors []error
+// group, ok := err.(errorGroup)
+// if ok {
+// errors = group.Errors()
+// } else {
+// errors = []error{err}
+// }
+package multierr // import "go.uber.org/multierr"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+ "sync"
+
+ "go.uber.org/atomic"
+)
+
+var (
+ // Separator for single-line error messages.
+ _singlelineSeparator = []byte("; ")
+
+ // Prefix for multi-line messages
+ _multilinePrefix = []byte("the following errors occurred:")
+
+ // Prefix for the first and following lines of an item in a list of
+ // multi-line error messages.
+ //
+ // For example, if a single item is:
+ //
+ // foo
+ // bar
+ //
+ // It will become,
+ //
+ // - foo
+ // bar
+ _multilineSeparator = []byte("\n - ")
+ _multilineIndent = []byte(" ")
+)
+
+// _bufferPool is a pool of bytes.Buffers.
+var _bufferPool = sync.Pool{
+ New: func() interface{} {
+ return &bytes.Buffer{}
+ },
+}
+
+type errorGroup interface {
+ Errors() []error
+}
+
+// Errors returns a slice containing zero or more errors that the supplied
+// error is composed of. If the error is nil, a nil slice is returned.
+//
+// err := multierr.Append(r.Close(), w.Close())
+// errors := multierr.Errors(err)
+//
+// If the error is not composed of other errors, the returned slice contains
+// just the error that was passed in.
+//
+// Callers of this function are free to modify the returned slice.
+func Errors(err error) []error {
+ if err == nil {
+ return nil
+ }
+
+ // Note that we're casting to multiError, not errorGroup. Our contract is
+ // that returned errors MAY implement errorGroup. Errors, however, only
+ // has special behavior for multierr-specific error objects.
+ //
+ // This behavior can be expanded in the future but I think it's prudent to
+ // start with as little as possible in terms of contract and possibility
+ // of misuse.
+ eg, ok := err.(*multiError)
+ if !ok {
+ return []error{err}
+ }
+
+ errors := eg.Errors()
+ result := make([]error, len(errors))
+ copy(result, errors)
+ return result
+}
+
+// multiError is an error that holds one or more errors.
+//
+// An instance of this is guaranteed to be non-empty and flattened. That is,
+// none of the errors inside multiError are other multiErrors.
+//
+// multiError formats to a semi-colon delimited list of error messages with
+// %v and with a more readable multi-line format with %+v.
+type multiError struct {
+ copyNeeded atomic.Bool
+ errors []error
+}
+
+var _ errorGroup = (*multiError)(nil)
+
+// Errors returns the list of underlying errors.
+//
+// This slice MUST NOT be modified.
+func (merr *multiError) Errors() []error {
+ if merr == nil {
+ return nil
+ }
+ return merr.errors
+}
+
+func (merr *multiError) Error() string {
+ if merr == nil {
+ return ""
+ }
+
+ buff := _bufferPool.Get().(*bytes.Buffer)
+ buff.Reset()
+
+ merr.writeSingleline(buff)
+
+ result := buff.String()
+ _bufferPool.Put(buff)
+ return result
+}
+
+func (merr *multiError) Format(f fmt.State, c rune) {
+ if c == 'v' && f.Flag('+') {
+ merr.writeMultiline(f)
+ } else {
+ merr.writeSingleline(f)
+ }
+}
+
+func (merr *multiError) writeSingleline(w io.Writer) {
+ first := true
+ for _, item := range merr.errors {
+ if first {
+ first = false
+ } else {
+ w.Write(_singlelineSeparator)
+ }
+ io.WriteString(w, item.Error())
+ }
+}
+
+func (merr *multiError) writeMultiline(w io.Writer) {
+ w.Write(_multilinePrefix)
+ for _, item := range merr.errors {
+ w.Write(_multilineSeparator)
+ writePrefixLine(w, _multilineIndent, fmt.Sprintf("%+v", item))
+ }
+}
+
+// Writes s to the writer with the given prefix added before each line after
+// the first.
+func writePrefixLine(w io.Writer, prefix []byte, s string) {
+ first := true
+ for len(s) > 0 {
+ if first {
+ first = false
+ } else {
+ w.Write(prefix)
+ }
+
+ idx := strings.IndexByte(s, '\n')
+ if idx < 0 {
+ idx = len(s) - 1
+ }
+
+ io.WriteString(w, s[:idx+1])
+ s = s[idx+1:]
+ }
+}
+
+type inspectResult struct {
+ // Number of top-level non-nil errors
+ Count int
+
+ // Total number of errors including multiErrors
+ Capacity int
+
+ // Index of the first non-nil error in the list. Value is meaningless if
+ // Count is zero.
+ FirstErrorIdx int
+
+ // Whether the list contains at least one multiError
+ ContainsMultiError bool
+}
+
+// Inspects the given slice of errors so that we can efficiently allocate
+// space for it.
+func inspect(errors []error) (res inspectResult) {
+ first := true
+ for i, err := range errors {
+ if err == nil {
+ continue
+ }
+
+ res.Count++
+ if first {
+ first = false
+ res.FirstErrorIdx = i
+ }
+
+ if merr, ok := err.(*multiError); ok {
+ res.Capacity += len(merr.errors)
+ res.ContainsMultiError = true
+ } else {
+ res.Capacity++
+ }
+ }
+ return
+}
+
+// fromSlice converts the given list of errors into a single error.
+func fromSlice(errors []error) error {
+ res := inspect(errors)
+ switch res.Count {
+ case 0:
+ return nil
+ case 1:
+ // only one non-nil entry
+ return errors[res.FirstErrorIdx]
+ case len(errors):
+ if !res.ContainsMultiError {
+ // already flat
+ return &multiError{errors: errors}
+ }
+ }
+
+ nonNilErrs := make([]error, 0, res.Capacity)
+ for _, err := range errors[res.FirstErrorIdx:] {
+ if err == nil {
+ continue
+ }
+
+ if nested, ok := err.(*multiError); ok {
+ nonNilErrs = append(nonNilErrs, nested.errors...)
+ } else {
+ nonNilErrs = append(nonNilErrs, err)
+ }
+ }
+
+ return &multiError{errors: nonNilErrs}
+}
+
+// Combine combines the passed errors into a single error.
+//
+// If zero arguments were passed or if all items are nil, a nil error is
+// returned.
+//
+// Combine(nil, nil) // == nil
+//
+// If only a single error was passed, it is returned as-is.
+//
+// Combine(err) // == err
+//
+// Combine skips over nil arguments so this function may be used to combine
+// together errors from operations that fail independently of each other.
+//
+// multierr.Combine(
+// reader.Close(),
+// writer.Close(),
+// pipe.Close(),
+// )
+//
+// If any of the passed errors is a multierr error, it will be flattened along
+// with the other errors.
+//
+// multierr.Combine(multierr.Combine(err1, err2), err3)
+// // is the same as
+// multierr.Combine(err1, err2, err3)
+//
+// The returned error formats into a readable multi-line error message if
+// formatted with %+v.
+//
+// fmt.Sprintf("%+v", multierr.Combine(err1, err2))
+func Combine(errors ...error) error {
+ return fromSlice(errors)
+}
+
+// Append appends the given errors together. Either value may be nil.
+//
+// This function is a specialization of Combine for the common case where
+// there are only two errors.
+//
+// err = multierr.Append(reader.Close(), writer.Close())
+//
+// The following pattern may also be used to record failure of deferred
+// operations without losing information about the original error.
+//
+// func doSomething(..) (err error) {
+// f := acquireResource()
+// defer func() {
+// err = multierr.Append(err, f.Close())
+// }()
+func Append(left error, right error) error {
+ switch {
+ case left == nil:
+ return right
+ case right == nil:
+ return left
+ }
+
+ if _, ok := right.(*multiError); !ok {
+ if l, ok := left.(*multiError); ok && !l.copyNeeded.Swap(true) {
+ // Common case where the error on the left is constantly being
+ // appended to.
+ errs := append(l.errors, right)
+ return &multiError{errors: errs}
+ } else if !ok {
+ // Both errors are single errors.
+ return &multiError{errors: []error{left, right}}
+ }
+ }
+
+ // Either right or both, left and right, are multiErrors. Rely on usual
+ // expensive logic.
+ errors := [2]error{left, right}
+ return fromSlice(errors[0:])
+}
+
+// AppendInto appends an error into the destination of an error pointer and
+// returns whether the error being appended was non-nil.
+//
+// var err error
+// multierr.AppendInto(&err, r.Close())
+// multierr.AppendInto(&err, w.Close())
+//
+// The above is equivalent to,
+//
+// err := multierr.Append(r.Close(), w.Close())
+//
+// As AppendInto reports whether the provided error was non-nil, it may be
+// used to build a multierr error in a loop more ergonomically. For example:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if multierr.AppendInto(&err, parse(line, &item)) {
+// continue
+// }
+// items = append(items, item)
+// }
+//
+// Compare this with a verison that relies solely on Append:
+//
+// var err error
+// for line := range lines {
+// var item Item
+// if parseErr := parse(line, &item); parseErr != nil {
+// err = multierr.Append(err, parseErr)
+// continue
+// }
+// items = append(items, item)
+// }
+func AppendInto(into *error, err error) (errored bool) {
+ if into == nil {
+ // We panic if 'into' is nil. This is not documented above
+ // because suggesting that the pointer must be non-nil may
+ // confuse users into thinking that the error that it points
+ // to must be non-nil.
+ panic("misuse of multierr.AppendInto: into pointer must not be nil")
+ }
+
+ if err == nil {
+ return false
+ }
+ *into = Append(*into, err)
+ return true
+}
diff --git a/vendor/go.uber.org/multierr/glide.yaml b/vendor/go.uber.org/multierr/glide.yaml
new file mode 100644
index 0000000..6ef084e
--- /dev/null
+++ b/vendor/go.uber.org/multierr/glide.yaml
@@ -0,0 +1,8 @@
+package: go.uber.org/multierr
+import:
+- package: go.uber.org/atomic
+ version: ^1
+testImport:
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
diff --git a/vendor/go.uber.org/multierr/go.mod b/vendor/go.uber.org/multierr/go.mod
new file mode 100644
index 0000000..ff8bdf9
--- /dev/null
+++ b/vendor/go.uber.org/multierr/go.mod
@@ -0,0 +1,8 @@
+module go.uber.org/multierr
+
+go 1.12
+
+require (
+ github.com/stretchr/testify v1.3.0
+ go.uber.org/atomic v1.7.0
+)
diff --git a/vendor/go.uber.org/multierr/go.sum b/vendor/go.uber.org/multierr/go.sum
new file mode 100644
index 0000000..ecfc286
--- /dev/null
+++ b/vendor/go.uber.org/multierr/go.sum
@@ -0,0 +1,11 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
diff --git a/vendor/go.uber.org/multierr/go113.go b/vendor/go.uber.org/multierr/go113.go
new file mode 100644
index 0000000..264b0ea
--- /dev/null
+++ b/vendor/go.uber.org/multierr/go113.go
@@ -0,0 +1,52 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// +build go1.13
+
+package multierr
+
+import "errors"
+
+// As attempts to find the first error in the error list that matches the type
+// of the value that target points to.
+//
+// This function allows errors.As to traverse the values stored on the
+// multierr error.
+func (merr *multiError) As(target interface{}) bool {
+ for _, err := range merr.Errors() {
+ if errors.As(err, target) {
+ return true
+ }
+ }
+ return false
+}
+
+// Is attempts to match the provided error against errors in the error list.
+//
+// This function allows errors.Is to traverse the values stored on the
+// multierr error.
+func (merr *multiError) Is(target error) bool {
+ for _, err := range merr.Errors() {
+ if errors.Is(err, target) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/.codecov.yml b/vendor/go.uber.org/zap/.codecov.yml
new file mode 100644
index 0000000..8e5ca7d
--- /dev/null
+++ b/vendor/go.uber.org/zap/.codecov.yml
@@ -0,0 +1,17 @@
+coverage:
+ range: 80..100
+ round: down
+ precision: 2
+
+ status:
+ project: # measuring the overall project coverage
+ default: # context, you can create multiple ones with custom titles
+ enabled: yes # must be yes|true to enable this status
+ target: 95% # specify the target coverage for each commit status
+ # option: "auto" (must increase from parent commit or pull request base)
+ # option: "X%" a static target percentage to hit
+ if_not_found: success # if parent is not found report status as success, error, or failure
+ if_ci_failed: error # if ci fails report status as success, error, or failure
+ignore:
+ - internal/readme/readme.go
+
diff --git a/vendor/go.uber.org/zap/.gitignore b/vendor/go.uber.org/zap/.gitignore
new file mode 100644
index 0000000..da9d9d0
--- /dev/null
+++ b/vendor/go.uber.org/zap/.gitignore
@@ -0,0 +1,32 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+vendor
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
+*.pprof
+*.out
+*.log
+
+/bin
+cover.out
+cover.html
diff --git a/vendor/go.uber.org/zap/.readme.tmpl b/vendor/go.uber.org/zap/.readme.tmpl
new file mode 100644
index 0000000..3154a1e
--- /dev/null
+++ b/vendor/go.uber.org/zap/.readme.tmpl
@@ -0,0 +1,109 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.<sup
+id="anchor-versions">[1](#footnote-versions)</sup>
+
+Log a message and 10 fields:
+
+{{.BenchmarkAddingFields}}
+
+Log a message with a logger that already has 10 fields of context:
+
+{{.BenchmarkAccumulatedContext}}
+
+Log a static string, without any context or `printf`-style templating:
+
+{{.BenchmarkWithoutFields}}
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+<hr>
+
+Released under the [MIT License](LICENSE.txt).
+
+<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in zap's [glide.lock][] file. [↩](#anchor-versions)
+
+[doc-img]: https://godoc.org/go.uber.org/zap?status.svg
+[doc]: https://godoc.org/go.uber.org/zap
+[ci-img]: https://travis-ci.com/uber-go/zap.svg?branch=master
+[ci]: https://travis-ci.com/uber-go/zap
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[glide.lock]: https://github.com/uber-go/zap/blob/master/glide.lock
+
diff --git a/vendor/go.uber.org/zap/CHANGELOG.md b/vendor/go.uber.org/zap/CHANGELOG.md
new file mode 100644
index 0000000..6c32101
--- /dev/null
+++ b/vendor/go.uber.org/zap/CHANGELOG.md
@@ -0,0 +1,492 @@
+# Changelog
+
+## 1.18.1 (28 Jun 2021)
+
+Bugfixes:
+* [#974][]: Fix nil dereference in logger constructed by `zap.NewNop`.
+
+[#974]: https://github.com/uber-go/zap/pull/974
+
+## 1.18.0 (28 Jun 2021)
+
+Enhancements:
+* [#961][]: Add `zapcore.BufferedWriteSyncer`, a new `WriteSyncer` that buffers
+ messages in-memory and flushes them periodically.
+* [#971][]: Add `zapio.Writer` to use a Zap logger as an `io.Writer`.
+* [#897][]: Add `zap.WithClock` option to control the source of time via the
+ new `zapcore.Clock` interface.
+* [#949][]: Avoid panicking in `zap.SugaredLogger` when arguments of `*w`
+ methods don't match expectations.
+* [#943][]: Add support for filtering by level or arbitrary matcher function to
+ `zaptest/observer`.
+* [#691][]: Comply with `io.StringWriter` and `io.ByteWriter` in Zap's
+ `buffer.Buffer`.
+
+Thanks to @atrn0, @ernado, @heyanfu, @hnlq715, @zchee
+for their contributions to this release.
+
+[#691]: https://github.com/uber-go/zap/pull/691
+[#897]: https://github.com/uber-go/zap/pull/897
+[#943]: https://github.com/uber-go/zap/pull/943
+[#949]: https://github.com/uber-go/zap/pull/949
+[#961]: https://github.com/uber-go/zap/pull/961
+[#971]: https://github.com/uber-go/zap/pull/971
+
+## 1.17.0 (25 May 2021)
+
+Bugfixes:
+* [#867][]: Encode `<nil>` for nil `error` instead of a panic.
+* [#931][], [#936][]: Update minimum version constraints to address
+ vulnerabilities in dependencies.
+
+Enhancements:
+* [#865][]: Improve alignment of fields of the Logger struct, reducing its
+ size from 96 to 80 bytes.
+* [#881][]: Support `grpclog.LoggerV2` in zapgrpc.
+* [#903][]: Support URL-encoded POST requests to the AtomicLevel HTTP handler
+ with the `application/x-www-form-urlencoded` content type.
+* [#912][]: Support multi-field encoding with `zap.Inline`.
+* [#913][]: Speed up SugaredLogger for calls with a single string.
+* [#928][]: Add support for filtering by field name to `zaptest/observer`.
+
+Thanks to @ash2k, @FMLS, @jimmystewpot, @Oncilla, @tsoslow, @tylitianrui, @withshubh, and @wziww for their contributions to this release.
+
+## 1.16.0 (1 Sep 2020)
+
+Bugfixes:
+* [#828][]: Fix missing newline in IncreaseLevel error messages.
+* [#835][]: Fix panic in JSON encoder when encoding times or durations
+ without specifying a time or duration encoder.
+* [#843][]: Honor CallerSkip when taking stack traces.
+* [#862][]: Fix the default file permissions to use `0666` and rely on the umask instead.
+* [#854][]: Encode `<nil>` for nil `Stringer` instead of a panic error log.
+
+Enhancements:
+* [#629][]: Added `zapcore.TimeEncoderOfLayout` to easily create time encoders
+ for custom layouts.
+* [#697][]: Added support for a configurable delimiter in the console encoder.
+* [#852][]: Optimize console encoder by pooling the underlying JSON encoder.
+* [#844][]: Add ability to include the calling function as part of logs.
+* [#843][]: Add `StackSkip` for including truncated stacks as a field.
+* [#861][]: Add options to customize Fatal behaviour for better testability.
+
+Thanks to @SteelPhase, @tmshn, @lixingwang, @wyxloading, @moul, @segevfiner, @andy-retailnext and @jcorbin for their contributions to this release.
+
+## 1.15.0 (23 Apr 2020)
+
+Bugfixes:
+* [#804][]: Fix handling of `Time` values out of `UnixNano` range.
+* [#812][]: Fix `IncreaseLevel` being reset after a call to `With`.
+
+Enhancements:
+* [#806][]: Add `WithCaller` option to supersede the `AddCaller` option. This
+ allows disabling annotation of log entries with caller information if
+ previously enabled with `AddCaller`.
+* [#813][]: Deprecate `NewSampler` constructor in favor of
+ `NewSamplerWithOptions` which supports a `SamplerHook` option. This option
+ adds support for monitoring sampling decisions through a hook.
+
+Thanks to @danielbprice for their contributions to this release.
+
+## 1.14.1 (14 Mar 2020)
+
+Bugfixes:
+* [#791][]: Fix panic on attempting to build a logger with an invalid Config.
+* [#795][]: Vendoring Zap with `go mod vendor` no longer includes Zap's
+ development-time dependencies.
+* [#799][]: Fix issue introduced in 1.14.0 that caused invalid JSON output to
+ be generated for arrays of `time.Time` objects when using string-based time
+ formats.
+
+Thanks to @YashishDua for their contributions to this release.
+
+## 1.14.0 (20 Feb 2020)
+
+Enhancements:
+* [#771][]: Optimize calls for disabled log levels.
+* [#773][]: Add millisecond duration encoder.
+* [#775][]: Add option to increase the level of a logger.
+* [#786][]: Optimize time formatters using `Time.AppendFormat` where possible.
+
+Thanks to @caibirdme for their contributions to this release.
+
+## 1.13.0 (13 Nov 2019)
+
+Enhancements:
+* [#758][]: Add `Intp`, `Stringp`, and other similar `*p` field constructors
+ to log pointers to primitives with support for `nil` values.
+
+Thanks to @jbizzle for their contributions to this release.
+
+## 1.12.0 (29 Oct 2019)
+
+Enhancements:
+* [#751][]: Migrate to Go modules.
+
+## 1.11.0 (21 Oct 2019)
+
+Enhancements:
+* [#725][]: Add `zapcore.OmitKey` to omit keys in an `EncoderConfig`.
+* [#736][]: Add `RFC3339` and `RFC3339Nano` time encoders.
+
+Thanks to @juicemia, @uhthomas for their contributions to this release.
+
+## 1.10.0 (29 Apr 2019)
+
+Bugfixes:
+* [#657][]: Fix `MapObjectEncoder.AppendByteString` not adding value as a
+ string.
+* [#706][]: Fix incorrect call depth to determine caller in Go 1.12.
+
+Enhancements:
+* [#610][]: Add `zaptest.WrapOptions` to wrap `zap.Option` for creating test
+ loggers.
+* [#675][]: Don't panic when encoding a String field.
+* [#704][]: Disable HTML escaping for JSON objects encoded using the
+ reflect-based encoder.
+
+Thanks to @iaroslav-ciupin, @lelenanam, @joa, @NWilson for their contributions
+to this release.
+
+## v1.9.1 (06 Aug 2018)
+
+Bugfixes:
+
+* [#614][]: MapObjectEncoder should not ignore empty slices.
+
+## v1.9.0 (19 Jul 2018)
+
+Enhancements:
+* [#602][]: Reduce number of allocations when logging with reflection.
+* [#572][], [#606][]: Expose a registry for third-party logging sinks.
+
+Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
+@dimroc for their contributions to this release.
+
+## v1.8.0 (13 Apr 2018)
+
+Enhancements:
+* [#508][]: Make log level configurable when redirecting the standard
+ library's logger.
+* [#518][]: Add a logger that writes to a `*testing.TB`.
+* [#577][]: Add a top-level alias for `zapcore.Field` to clean up GoDoc.
+
+Bugfixes:
+* [#574][]: Add a missing import comment to `go.uber.org/zap/buffer`.
+
+Thanks to @DiSiqueira and @djui for their contributions to this release.
+
+## v1.7.1 (25 Sep 2017)
+
+Bugfixes:
+* [#504][]: Store strings when using AddByteString with the map encoder.
+
+## v1.7.0 (21 Sep 2017)
+
+Enhancements:
+
+* [#487][]: Add `NewStdLogAt`, which extends `NewStdLog` by allowing the user
+ to specify the level of the logged messages.
+
+## v1.6.0 (30 Aug 2017)
+
+Enhancements:
+
+* [#491][]: Omit zap stack frames from stacktraces.
+* [#490][]: Add a `ContextMap` method to observer logs for simpler
+ field validation in tests.
+
+## v1.5.0 (22 Jul 2017)
+
+Enhancements:
+
+* [#460][] and [#470][]: Support errors produced by `go.uber.org/multierr`.
+* [#465][]: Support user-supplied encoders for logger names.
+
+Bugfixes:
+
+* [#477][]: Fix a bug that incorrectly truncated deep stacktraces.
+
+Thanks to @richard-tunein and @pavius for their contributions to this release.
+
+## v1.4.1 (08 Jun 2017)
+
+This release fixes two bugs.
+
+Bugfixes:
+
+* [#435][]: Support a variety of case conventions when unmarshaling levels.
+* [#444][]: Fix a panic in the observer.
+
+## v1.4.0 (12 May 2017)
+
+This release adds a few small features and is fully backward-compatible.
+
+Enhancements:
+
+* [#424][]: Add a `LineEnding` field to `EncoderConfig`, allowing users to
+ override the Unix-style default.
+* [#425][]: Preserve time zones when logging times.
+* [#431][]: Make `zap.AtomicLevel` implement `fmt.Stringer`, which makes a
+ variety of operations a bit simpler.
+
+## v1.3.0 (25 Apr 2017)
+
+This release adds an enhancement to zap's testing helpers as well as the
+ability to marshal an AtomicLevel. It is fully backward-compatible.
+
+Enhancements:
+
+* [#415][]: Add a substring-filtering helper to zap's observer. This is
+ particularly useful when testing the `SugaredLogger`.
+* [#416][]: Make `AtomicLevel` implement `encoding.TextMarshaler`.
+
+## v1.2.0 (13 Apr 2017)
+
+This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
+
+Enhancements:
+
+* [#402][]: Add a `zapgrpc` package that wraps zap's Logger and implements
+ `grpclog.Logger`.
+
+## v1.1.0 (31 Mar 2017)
+
+This release fixes two bugs and adds some enhancements to zap's testing helpers.
+It is fully backward-compatible.
+
+Bugfixes:
+
+* [#385][]: Fix caller path trimming on Windows.
+* [#396][]: Fix a panic when attempting to use non-existent directories with
+ zap's configuration struct.
+
+Enhancements:
+
+* [#386][]: Add filtering helpers to zaptest's observing logger.
+
+Thanks to @moitias for contributing to this release.
+
+## v1.0.0 (14 Mar 2017)
+
+This is zap's first stable release. All exported APIs are now final, and no
+further breaking changes will be made in the 1.x release series. Anyone using a
+semver-aware dependency manager should now pin to `^1`.
+
+Breaking changes:
+
+* [#366][]: Add byte-oriented APIs to encoders to log UTF-8 encoded text without
+ casting from `[]byte` to `string`.
+* [#364][]: To support buffering outputs, add `Sync` methods to `zapcore.Core`,
+ `zap.Logger`, and `zap.SugaredLogger`.
+* [#371][]: Rename the `testutils` package to `zaptest`, which is less likely to
+ clash with other testing helpers.
+
+Bugfixes:
+
+* [#362][]: Make the ISO8601 time formatters fixed-width, which is friendlier
+ for tab-separated console output.
+* [#369][]: Remove the automatic locks in `zapcore.NewCore`, which allows zap to
+ work with concurrency-safe `WriteSyncer` implementations.
+* [#347][]: Stop reporting errors when trying to `fsync` standard out on Linux
+ systems.
+* [#373][]: Report the correct caller from zap's standard library
+ interoperability wrappers.
+
+Enhancements:
+
+* [#348][]: Add a registry allowing third-party encodings to work with zap's
+ built-in `Config`.
+* [#327][]: Make the representation of logger callers configurable (like times,
+ levels, and durations).
+* [#376][]: Allow third-party encoders to use their own buffer pools, which
+ removes the last performance advantage that zap's encoders have over plugins.
+* [#346][]: Add `CombineWriteSyncers`, a convenience function to tee multiple
+ `WriteSyncer`s and lock the result.
+* [#365][]: Make zap's stacktraces compatible with mid-stack inlining (coming in
+ Go 1.9).
+* [#372][]: Export zap's observing logger as `zaptest/observer`. This makes it
+ easier for particularly punctilious users to unit test their application's
+ logging.
+
+Thanks to @suyash, @htrendev, @flisky, @Ulexus, and @skipor for their
+contributions to this release.
+
+## v1.0.0-rc.3 (7 Mar 2017)
+
+This is the third release candidate for zap's stable release. There are no
+breaking changes.
+
+Bugfixes:
+
+* [#339][]: Byte slices passed to `zap.Any` are now correctly treated as binary blobs
+ rather than `[]uint8`.
+
+Enhancements:
+
+* [#307][]: Users can opt into colored output for log levels.
+* [#353][]: In addition to hijacking the output of the standard library's
+ package-global logging functions, users can now construct a zap-backed
+ `log.Logger` instance.
+* [#311][]: Frames from common runtime functions and some of zap's internal
+ machinery are now omitted from stacktraces.
+
+Thanks to @ansel1 and @suyash for their contributions to this release.
+
+## v1.0.0-rc.2 (21 Feb 2017)
+
+This is the second release candidate for zap's stable release. It includes two
+breaking changes.
+
+Breaking changes:
+
+* [#316][]: Zap's global loggers are now fully concurrency-safe
+ (previously, users had to ensure that `ReplaceGlobals` was called before the
+ loggers were in use). However, they must now be accessed via the `L()` and
+ `S()` functions. Users can update their projects with
+
+ ```
+ gofmt -r "zap.L -> zap.L()" -w .
+ gofmt -r "zap.S -> zap.S()" -w .
+ ```
+* [#309][] and [#317][]: RC1 was mistakenly shipped with invalid
+ JSON and YAML struct tags on all config structs. This release fixes the tags
+ and adds static analysis to prevent similar bugs in the future.
+
+Bugfixes:
+
+* [#321][]: Redirecting the standard library's `log` output now
+ correctly reports the logger's caller.
+
+Enhancements:
+
+* [#325][] and [#333][]: Zap now transparently supports non-standard, rich
+ errors like those produced by `github.com/pkg/errors`.
+* [#326][]: Though `New(nil)` continues to return a no-op logger, `NewNop()` is
+ now preferred. Users can update their projects with `gofmt -r 'zap.New(nil) ->
+ zap.NewNop()' -w .`.
+* [#300][]: Incorrectly importing zap as `github.com/uber-go/zap` now returns a
+ more informative error.
+
+Thanks to @skipor and @chapsuk for their contributions to this release.
+
+## v1.0.0-rc.1 (14 Feb 2017)
+
+This is the first release candidate for zap's stable release. There are multiple
+breaking changes and improvements from the pre-release version. Most notably:
+
+* **Zap's import path is now "go.uber.org/zap"** — all users will
+ need to update their code.
+* User-facing types and functions remain in the `zap` package. Code relevant
+ largely to extension authors is now in the `zapcore` package.
+* The `zapcore.Core` type makes it easy for third-party packages to use zap's
+ internals but provide a different user-facing API.
+* `Logger` is now a concrete type instead of an interface.
+* A less verbose (though slower) logging API is included by default.
+* Package-global loggers `L` and `S` are included.
+* A human-friendly console encoder is included.
+* A declarative config struct allows common logger configurations to be managed
+ as configuration instead of code.
+* Sampling is more accurate, and doesn't depend on the standard library's shared
+ timer heap.
+
+## v0.1.0-beta.1 (6 Feb 2017)
+
+This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
+upgrade at their leisure. Since this is the first tagged release, there are no
+backward compatibility concerns and all functionality is new.
+
+Early zap adopters should pin to the 0.1.x minor version until they're ready to
+upgrade to the upcoming stable release.
+
+[#316]: https://github.com/uber-go/zap/pull/316
+[#309]: https://github.com/uber-go/zap/pull/309
+[#317]: https://github.com/uber-go/zap/pull/317
+[#321]: https://github.com/uber-go/zap/pull/321
+[#325]: https://github.com/uber-go/zap/pull/325
+[#333]: https://github.com/uber-go/zap/pull/333
+[#326]: https://github.com/uber-go/zap/pull/326
+[#300]: https://github.com/uber-go/zap/pull/300
+[#339]: https://github.com/uber-go/zap/pull/339
+[#307]: https://github.com/uber-go/zap/pull/307
+[#353]: https://github.com/uber-go/zap/pull/353
+[#311]: https://github.com/uber-go/zap/pull/311
+[#366]: https://github.com/uber-go/zap/pull/366
+[#364]: https://github.com/uber-go/zap/pull/364
+[#371]: https://github.com/uber-go/zap/pull/371
+[#362]: https://github.com/uber-go/zap/pull/362
+[#369]: https://github.com/uber-go/zap/pull/369
+[#347]: https://github.com/uber-go/zap/pull/347
+[#373]: https://github.com/uber-go/zap/pull/373
+[#348]: https://github.com/uber-go/zap/pull/348
+[#327]: https://github.com/uber-go/zap/pull/327
+[#376]: https://github.com/uber-go/zap/pull/376
+[#346]: https://github.com/uber-go/zap/pull/346
+[#365]: https://github.com/uber-go/zap/pull/365
+[#372]: https://github.com/uber-go/zap/pull/372
+[#385]: https://github.com/uber-go/zap/pull/385
+[#396]: https://github.com/uber-go/zap/pull/396
+[#386]: https://github.com/uber-go/zap/pull/386
+[#402]: https://github.com/uber-go/zap/pull/402
+[#415]: https://github.com/uber-go/zap/pull/415
+[#416]: https://github.com/uber-go/zap/pull/416
+[#424]: https://github.com/uber-go/zap/pull/424
+[#425]: https://github.com/uber-go/zap/pull/425
+[#431]: https://github.com/uber-go/zap/pull/431
+[#435]: https://github.com/uber-go/zap/pull/435
+[#444]: https://github.com/uber-go/zap/pull/444
+[#477]: https://github.com/uber-go/zap/pull/477
+[#465]: https://github.com/uber-go/zap/pull/465
+[#460]: https://github.com/uber-go/zap/pull/460
+[#470]: https://github.com/uber-go/zap/pull/470
+[#487]: https://github.com/uber-go/zap/pull/487
+[#490]: https://github.com/uber-go/zap/pull/490
+[#491]: https://github.com/uber-go/zap/pull/491
+[#504]: https://github.com/uber-go/zap/pull/504
+[#508]: https://github.com/uber-go/zap/pull/508
+[#518]: https://github.com/uber-go/zap/pull/518
+[#577]: https://github.com/uber-go/zap/pull/577
+[#574]: https://github.com/uber-go/zap/pull/574
+[#602]: https://github.com/uber-go/zap/pull/602
+[#572]: https://github.com/uber-go/zap/pull/572
+[#606]: https://github.com/uber-go/zap/pull/606
+[#614]: https://github.com/uber-go/zap/pull/614
+[#657]: https://github.com/uber-go/zap/pull/657
+[#706]: https://github.com/uber-go/zap/pull/706
+[#610]: https://github.com/uber-go/zap/pull/610
+[#675]: https://github.com/uber-go/zap/pull/675
+[#704]: https://github.com/uber-go/zap/pull/704
+[#725]: https://github.com/uber-go/zap/pull/725
+[#736]: https://github.com/uber-go/zap/pull/736
+[#751]: https://github.com/uber-go/zap/pull/751
+[#758]: https://github.com/uber-go/zap/pull/758
+[#771]: https://github.com/uber-go/zap/pull/771
+[#773]: https://github.com/uber-go/zap/pull/773
+[#775]: https://github.com/uber-go/zap/pull/775
+[#786]: https://github.com/uber-go/zap/pull/786
+[#791]: https://github.com/uber-go/zap/pull/791
+[#795]: https://github.com/uber-go/zap/pull/795
+[#799]: https://github.com/uber-go/zap/pull/799
+[#804]: https://github.com/uber-go/zap/pull/804
+[#812]: https://github.com/uber-go/zap/pull/812
+[#806]: https://github.com/uber-go/zap/pull/806
+[#813]: https://github.com/uber-go/zap/pull/813
+[#629]: https://github.com/uber-go/zap/pull/629
+[#697]: https://github.com/uber-go/zap/pull/697
+[#828]: https://github.com/uber-go/zap/pull/828
+[#835]: https://github.com/uber-go/zap/pull/835
+[#843]: https://github.com/uber-go/zap/pull/843
+[#844]: https://github.com/uber-go/zap/pull/844
+[#852]: https://github.com/uber-go/zap/pull/852
+[#854]: https://github.com/uber-go/zap/pull/854
+[#861]: https://github.com/uber-go/zap/pull/861
+[#862]: https://github.com/uber-go/zap/pull/862
+[#865]: https://github.com/uber-go/zap/pull/865
+[#867]: https://github.com/uber-go/zap/pull/867
+[#881]: https://github.com/uber-go/zap/pull/881
+[#903]: https://github.com/uber-go/zap/pull/903
+[#912]: https://github.com/uber-go/zap/pull/912
+[#913]: https://github.com/uber-go/zap/pull/913
+[#928]: https://github.com/uber-go/zap/pull/928
+[#931]: https://github.com/uber-go/zap/pull/931
+[#936]: https://github.com/uber-go/zap/pull/936
diff --git a/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000..e327d9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/CODE_OF_CONDUCT.md
@@ -0,0 +1,75 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as
+contributors and maintainers pledge to making participation in our project and
+our community a harassment-free experience for everyone, regardless of age,
+body size, disability, ethnicity, gender identity and expression, level of
+experience, nationality, personal appearance, race, religion, or sexual
+identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment
+include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or
+ advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic
+ address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable
+behavior and are expected to take appropriate and fair corrective action in
+response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or
+reject comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct, or to ban temporarily or
+permanently any contributor for other behaviors that they deem inappropriate,
+threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community. Examples of
+representing a project or community include using an official project e-mail
+address, posting via an official social media account, or acting as an
+appointed representative at an online or offline event. Representation of a
+project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported by contacting the project team at oss-conduct@uber.com. The project
+team will review and investigate all complaints, and will respond in a way
+that it deems appropriate to the circumstances. The project team is obligated
+to maintain confidentiality with regard to the reporter of an incident.
+Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good
+faith may face temporary or permanent repercussions as determined by other
+members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 1.4, available at
+[http://contributor-covenant.org/version/1/4][version].
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/zap/CONTRIBUTING.md b/vendor/go.uber.org/zap/CONTRIBUTING.md
new file mode 100644
index 0000000..5cd9656
--- /dev/null
+++ b/vendor/go.uber.org/zap/CONTRIBUTING.md
@@ -0,0 +1,75 @@
+# Contributing
+
+We'd love your help making zap the very best structured logging library in Go!
+
+If you'd like to add new exported APIs, please [open an issue][open-issue]
+describing your proposal — discussing API changes ahead of time makes
+pull request review much smoother. In your issue, pull request, and any other
+communications, please remember to treat your fellow contributors with
+respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
+
+Note that you'll need to sign [Uber's Contributor License Agreement][cla]
+before we can accept any of your contributions. If necessary, a bot will remind
+you to accept the CLA when you open your pull request.
+
+## Setup
+
+[Fork][fork], then clone the repository:
+
+```
+mkdir -p $GOPATH/src/go.uber.org
+cd $GOPATH/src/go.uber.org
+git clone git@github.com:your_github_username/zap.git
+cd zap
+git remote add upstream https://github.com/uber-go/zap.git
+git fetch upstream
+```
+
+Make sure that the tests and the linters pass:
+
+```
+make test
+make lint
+```
+
+If you're not using the minor version of Go specified in the Makefile's
+`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
+fine, but it means that you'll only discover lint failures after you open your
+pull request.
+
+## Making Changes
+
+Start by creating a new branch for your changes:
+
+```
+cd $GOPATH/src/go.uber.org/zap
+git checkout master
+git fetch upstream
+git rebase upstream/master
+git checkout -b cool_new_feature
+```
+
+Make your changes, then ensure that `make lint` and `make test` still pass. If
+you're satisfied with your changes, push them to your fork.
+
+```
+git push origin cool_new_feature
+```
+
+Then use the GitHub UI to open a pull request.
+
+At this point, you're waiting on us to review your changes. We *try* to respond
+to issues and pull requests within a few business days, and we may suggest some
+improvements or alternatives. Once your changes are approved, one of the
+project maintainers will merge them.
+
+We're much more likely to approve your changes if you:
+
+* Add tests for new functionality.
+* Write a [good commit message][commit-message].
+* Maintain backward compatibility.
+
+[fork]: https://github.com/uber-go/zap/fork
+[open-issue]: https://github.com/uber-go/zap/issues/new
+[cla]: https://cla-assistant.io/uber-go/zap
+[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/zap/FAQ.md b/vendor/go.uber.org/zap/FAQ.md
new file mode 100644
index 0000000..b183b20
--- /dev/null
+++ b/vendor/go.uber.org/zap/FAQ.md
@@ -0,0 +1,164 @@
+# Frequently Asked Questions
+
+## Design
+
+### Why spend so much effort on logger performance?
+
+Of course, most applications won't notice the impact of a slow logger: they
+already take tens or hundreds of milliseconds for each operation, so an extra
+millisecond doesn't matter.
+
+On the other hand, why *not* make structured logging fast? The `SugaredLogger`
+isn't any harder to use than other logging packages, and the `Logger` makes
+structured logging possible in performance-sensitive contexts. Across a fleet
+of Go microservices, making each application even slightly more efficient adds
+up quickly.
+
+### Why aren't `Logger` and `SugaredLogger` interfaces?
+
+Unlike the familiar `io.Writer` and `http.Handler`, `Logger` and
+`SugaredLogger` interfaces would include *many* methods. As [Rob Pike points
+out][go-proverbs], "The bigger the interface, the weaker the abstraction."
+Interfaces are also rigid — *any* change requires releasing a new major
+version, since it breaks all third-party implementations.
+
+Making the `Logger` and `SugaredLogger` concrete types doesn't sacrifice much
+abstraction, and it lets us add methods without introducing breaking changes.
+Your applications should define and depend upon an interface that includes
+just the methods you use.
+
+### Why are some of my logs missing?
+
+Logs are dropped intentionally by zap when sampling is enabled. The production
+configuration (as returned by `NewProductionConfig()` enables sampling which will
+cause repeated logs within a second to be sampled. See more details on why sampling
+is enabled in [Why sample application logs](https://github.com/uber-go/zap/blob/master/FAQ.md#why-sample-application-logs).
+
+### Why sample application logs?
+
+Applications often experience runs of errors, either because of a bug or
+because of a misbehaving user. Logging errors is usually a good idea, but it
+can easily make this bad situation worse: not only is your application coping
+with a flood of errors, it's also spending extra CPU cycles and I/O logging
+those errors. Since writes are typically serialized, logging limits throughput
+when you need it most.
+
+Sampling fixes this problem by dropping repetitive log entries. Under normal
+conditions, your application writes out every entry. When similar entries are
+logged hundreds or thousands of times each second, though, zap begins dropping
+duplicates to preserve throughput.
+
+### Why do the structured logging APIs take a message in addition to fields?
+
+Subjectively, we find it helpful to accompany structured context with a brief
+description. This isn't critical during development, but it makes debugging
+and operating unfamiliar systems much easier.
+
+More concretely, zap's sampling algorithm uses the message to identify
+duplicate entries. In our experience, this is a practical middle ground
+between random sampling (which often drops the exact entry that you need while
+debugging) and hashing the complete entry (which is prohibitively expensive).
+
+### Why include package-global loggers?
+
+Since so many other logging packages include a global logger, many
+applications aren't designed to accept loggers as explicit parameters.
+Changing function signatures is often a breaking change, so zap includes
+global loggers to simplify migration.
+
+Avoid them where possible.
+
+### Why include dedicated Panic and Fatal log levels?
+
+In general, application code should handle errors gracefully instead of using
+`panic` or `os.Exit`. However, every rule has exceptions, and it's common to
+crash when an error is truly unrecoverable. To avoid losing any information
+— especially the reason for the crash — the logger must flush any
+buffered entries before the process exits.
+
+Zap makes this easy by offering `Panic` and `Fatal` logging methods that
+automatically flush before exiting. Of course, this doesn't guarantee that
+logs will never be lost, but it eliminates a common error.
+
+See the discussion in uber-go/zap#207 for more details.
+
+### What's `DPanic`?
+
+`DPanic` stands for "panic in development." In development, it logs at
+`PanicLevel`; otherwise, it logs at `ErrorLevel`. `DPanic` makes it easier to
+catch errors that are theoretically possible, but shouldn't actually happen,
+*without* crashing in production.
+
+If you've ever written code like this, you need `DPanic`:
+
+```go
+if err != nil {
+ panic(fmt.Sprintf("shouldn't ever get here: %v", err))
+}
+```
+
+## Installation
+
+### What does the error `expects import "go.uber.org/zap"` mean?
+
+Either zap was installed incorrectly or you're referencing the wrong package
+name in your code.
+
+Zap's source code happens to be hosted on GitHub, but the [import
+path][import-path] is `go.uber.org/zap`. This gives us, the project
+maintainers, the freedom to move the source code if necessary. However, it
+means that you need to take a little care when installing and using the
+package.
+
+If you follow two simple rules, everything should work: install zap with `go
+get -u go.uber.org/zap`, and always import it in your code with `import
+"go.uber.org/zap"`. Your code shouldn't contain *any* references to
+`github.com/uber-go/zap`.
+
+## Usage
+
+### Does zap support log rotation?
+
+Zap doesn't natively support rotating log files, since we prefer to leave this
+to an external program like `logrotate`.
+
+However, it's easy to integrate a log rotation package like
+[`gopkg.in/natefinch/lumberjack.v2`][lumberjack] as a `zapcore.WriteSyncer`.
+
+```go
+// lumberjack.Logger is already safe for concurrent use, so we don't need to
+// lock it.
+w := zapcore.AddSync(&lumberjack.Logger{
+ Filename: "/var/log/myapp/foo.log",
+ MaxSize: 500, // megabytes
+ MaxBackups: 3,
+ MaxAge: 28, // days
+})
+core := zapcore.NewCore(
+ zapcore.NewJSONEncoder(zap.NewProductionEncoderConfig()),
+ w,
+ zap.InfoLevel,
+)
+logger := zap.New(core)
+```
+
+## Extensions
+
+We'd love to support every logging need within zap itself, but we're only
+familiar with a handful of log ingestion systems, flag-parsing packages, and
+the like. Rather than merging code that we can't effectively debug and
+support, we'd rather grow an ecosystem of zap extensions.
+
+We're aware of the following extensions, but haven't used them ourselves:
+
+| Package | Integration |
+| --- | --- |
+| `github.com/tchap/zapext` | Sentry, syslog |
+| `github.com/fgrosse/zaptest` | Ginkgo |
+| `github.com/blendle/zapdriver` | Stackdriver |
+| `github.com/moul/zapgorm` | Gorm |
+| `github.com/moul/zapfilter` | Advanced filtering rules |
+
+[go-proverbs]: https://go-proverbs.github.io/
+[import-path]: https://golang.org/cmd/go/#hdr-Remote_import_paths
+[lumberjack]: https://godoc.org/gopkg.in/natefinch/lumberjack.v2
diff --git a/vendor/go.uber.org/zap/LICENSE.txt b/vendor/go.uber.org/zap/LICENSE.txt
new file mode 100644
index 0000000..6652bed
--- /dev/null
+++ b/vendor/go.uber.org/zap/LICENSE.txt
@@ -0,0 +1,19 @@
+Copyright (c) 2016-2017 Uber Technologies, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/go.uber.org/zap/Makefile b/vendor/go.uber.org/zap/Makefile
new file mode 100644
index 0000000..9b1bc3b
--- /dev/null
+++ b/vendor/go.uber.org/zap/Makefile
@@ -0,0 +1,73 @@
+export GOBIN ?= $(shell pwd)/bin
+
+GOLINT = $(GOBIN)/golint
+STATICCHECK = $(GOBIN)/staticcheck
+BENCH_FLAGS ?= -cpuprofile=cpu.pprof -memprofile=mem.pprof -benchmem
+
+# Directories containing independent Go modules.
+#
+# We track coverage only for the main module.
+MODULE_DIRS = . ./benchmarks ./zapgrpc/internal/test
+
+# Many Go tools take file globs or directories as arguments instead of packages.
+GO_FILES := $(shell \
+ find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
+ -o -name '*.go' -print | cut -b3-)
+
+.PHONY: all
+all: lint test
+
+.PHONY: lint
+lint: $(GOLINT) $(STATICCHECK)
+ @rm -rf lint.log
+ @echo "Checking formatting..."
+ @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
+ @echo "Checking vet..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go vet ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking lint..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(GOLINT) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking staticcheck..."
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && $(STATICCHECK) ./... 2>&1) &&) true | tee -a lint.log
+ @echo "Checking for unresolved FIXMEs..."
+ @git grep -i fixme | grep -v -e Makefile | tee -a lint.log
+ @echo "Checking for license headers..."
+ @./checklicense.sh | tee -a lint.log
+ @[ ! -s lint.log ]
+ @echo "Checking 'go mod tidy'..."
+ @make tidy
+ @if ! git diff --quiet; then \
+ echo "'go mod tidy' resulted in changes or working tree is dirty:"; \
+ git --no-pager diff; \
+ fi
+
+$(GOLINT):
+ cd tools && go install golang.org/x/lint/golint
+
+$(STATICCHECK):
+ cd tools && go install honnef.co/go/tools/cmd/staticcheck
+
+.PHONY: test
+test:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go test -race ./...) &&) true
+
+.PHONY: cover
+cover:
+ go test -race -coverprofile=cover.out -coverpkg=./... ./...
+ go tool cover -html=cover.out -o cover.html
+
+.PHONY: bench
+BENCH ?= .
+bench:
+ @$(foreach dir,$(MODULE_DIRS), ( \
+ cd $(dir) && \
+ go list ./... | xargs -n1 go test -bench=$(BENCH) -run="^$$" $(BENCH_FLAGS) \
+ ) &&) true
+
+.PHONY: updatereadme
+updatereadme:
+ rm -f README.md
+ cat .readme.tmpl | go run internal/readme/readme.go > README.md
+
+.PHONY: tidy
+tidy:
+ @$(foreach dir,$(MODULE_DIRS),(cd $(dir) && go mod tidy) &&) true
diff --git a/vendor/go.uber.org/zap/README.md b/vendor/go.uber.org/zap/README.md
new file mode 100644
index 0000000..1e64d6c
--- /dev/null
+++ b/vendor/go.uber.org/zap/README.md
@@ -0,0 +1,134 @@
+# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+Blazing fast, structured, leveled logging in Go.
+
+## Installation
+
+`go get -u go.uber.org/zap`
+
+Note that zap only supports the two most recent minor versions of Go.
+
+## Quick Start
+
+In contexts where performance is nice, but not critical, use the
+`SugaredLogger`. It's 4-10x faster than other structured logging
+packages and includes both structured and `printf`-style APIs.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync() // flushes buffer, if any
+sugar := logger.Sugar()
+sugar.Infow("failed to fetch URL",
+ // Structured context as loosely typed key-value pairs.
+ "url", url,
+ "attempt", 3,
+ "backoff", time.Second,
+)
+sugar.Infof("Failed to fetch URL: %s", url)
+```
+
+When performance and type safety are critical, use the `Logger`. It's even
+faster than the `SugaredLogger` and allocates far less, but it only supports
+structured logging.
+
+```go
+logger, _ := zap.NewProduction()
+defer logger.Sync()
+logger.Info("failed to fetch URL",
+ // Structured context as strongly typed Field values.
+ zap.String("url", url),
+ zap.Int("attempt", 3),
+ zap.Duration("backoff", time.Second),
+)
+```
+
+See the [documentation][doc] and [FAQ](FAQ.md) for more details.
+
+## Performance
+
+For applications that log in the hot path, reflection-based serialization and
+string formatting are prohibitively expensive — they're CPU-intensive
+and make many small allocations. Put differently, using `encoding/json` and
+`fmt.Fprintf` to log tons of `interface{}`s makes your application slow.
+
+Zap takes a different approach. It includes a reflection-free, zero-allocation
+JSON encoder, and the base `Logger` strives to avoid serialization overhead
+and allocations wherever possible. By building the high-level `SugaredLogger`
+on that foundation, zap lets users *choose* when they need to count every
+allocation and when they'd prefer a more familiar, loosely typed API.
+
+As measured by its own [benchmarking suite][], not only is zap more performant
+than comparable structured logging packages — it's also faster than the
+standard library. Like all benchmarks, take these with a grain of salt.<sup
+id="anchor-versions">[1](#footnote-versions)</sup>
+
+Log a message and 10 fields:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 862 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 1250 ns/op | +45% | 11 allocs/op
+| zerolog | 4021 ns/op | +366% | 76 allocs/op
+| go-kit | 4542 ns/op | +427% | 105 allocs/op
+| apex/log | 26785 ns/op | +3007% | 115 allocs/op
+| logrus | 29501 ns/op | +3322% | 125 allocs/op
+| log15 | 29906 ns/op | +3369% | 122 allocs/op
+
+Log a message with a logger that already has 10 fields of context:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 126 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 187 ns/op | +48% | 2 allocs/op
+| zerolog | 88 ns/op | -30% | 0 allocs/op
+| go-kit | 5087 ns/op | +3937% | 103 allocs/op
+| log15 | 18548 ns/op | +14621% | 73 allocs/op
+| apex/log | 26012 ns/op | +20544% | 104 allocs/op
+| logrus | 27236 ns/op | +21516% | 113 allocs/op
+
+Log a static string, without any context or `printf`-style templating:
+
+| Package | Time | Time % to zap | Objects Allocated |
+| :------ | :--: | :-----------: | :---------------: |
+| :zap: zap | 118 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 191 ns/op | +62% | 2 allocs/op
+| zerolog | 93 ns/op | -21% | 0 allocs/op
+| go-kit | 280 ns/op | +137% | 11 allocs/op
+| standard library | 499 ns/op | +323% | 2 allocs/op
+| apex/log | 1990 ns/op | +1586% | 10 allocs/op
+| logrus | 3129 ns/op | +2552% | 24 allocs/op
+| log15 | 3887 ns/op | +3194% | 23 allocs/op
+
+## Development Status: Stable
+
+All APIs are finalized, and no breaking changes will be made in the 1.x series
+of releases. Users of semver-aware dependency management systems should pin
+zap to `^1`.
+
+## Contributing
+
+We encourage and support an active, healthy community of contributors —
+including you! Details are in the [contribution guide](CONTRIBUTING.md) and
+the [code of conduct](CODE_OF_CONDUCT.md). The zap maintainers keep an eye on
+issues and pull requests, but you can also report any negative conduct to
+oss-conduct@uber.com. That email list is a private, safe space; even the zap
+maintainers don't have access, so don't hesitate to hold us to a high
+standard.
+
+<hr>
+
+Released under the [MIT License](LICENSE.txt).
+
+<sup id="footnote-versions">1</sup> In particular, keep in mind that we may be
+benchmarking against slightly older versions of other packages. Versions are
+pinned in the [benchmarks/go.mod][] file. [↩](#anchor-versions)
+
+[doc-img]: https://pkg.go.dev/badge/go.uber.org/zap
+[doc]: https://pkg.go.dev/go.uber.org/zap
+[ci-img]: https://github.com/uber-go/zap/actions/workflows/go.yml/badge.svg
+[ci]: https://github.com/uber-go/zap/actions/workflows/go.yml
+[cov-img]: https://codecov.io/gh/uber-go/zap/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/uber-go/zap
+[benchmarking suite]: https://github.com/uber-go/zap/tree/master/benchmarks
+[benchmarks/go.mod]: https://github.com/uber-go/zap/blob/master/benchmarks/go.mod
+
diff --git a/vendor/go.uber.org/zap/array.go b/vendor/go.uber.org/zap/array.go
new file mode 100644
index 0000000..5be3704
--- /dev/null
+++ b/vendor/go.uber.org/zap/array.go
@@ -0,0 +1,320 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Array constructs a field with the given key and ArrayMarshaler. It provides
+// a flexible, but still type-safe and efficient, way to add array-like types
+// to the logging context. The struct's MarshalLogArray method is called lazily.
+func Array(key string, val zapcore.ArrayMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ArrayMarshalerType, Interface: val}
+}
+
+// Bools constructs a field that carries a slice of bools.
+func Bools(key string, bs []bool) Field {
+ return Array(key, bools(bs))
+}
+
+// ByteStrings constructs a field that carries a slice of []byte, each of which
+// must be UTF-8 encoded text.
+func ByteStrings(key string, bss [][]byte) Field {
+ return Array(key, byteStringsArray(bss))
+}
+
+// Complex128s constructs a field that carries a slice of complex numbers.
+func Complex128s(key string, nums []complex128) Field {
+ return Array(key, complex128s(nums))
+}
+
+// Complex64s constructs a field that carries a slice of complex numbers.
+func Complex64s(key string, nums []complex64) Field {
+ return Array(key, complex64s(nums))
+}
+
+// Durations constructs a field that carries a slice of time.Durations.
+func Durations(key string, ds []time.Duration) Field {
+ return Array(key, durations(ds))
+}
+
+// Float64s constructs a field that carries a slice of floats.
+func Float64s(key string, nums []float64) Field {
+ return Array(key, float64s(nums))
+}
+
+// Float32s constructs a field that carries a slice of floats.
+func Float32s(key string, nums []float32) Field {
+ return Array(key, float32s(nums))
+}
+
+// Ints constructs a field that carries a slice of integers.
+func Ints(key string, nums []int) Field {
+ return Array(key, ints(nums))
+}
+
+// Int64s constructs a field that carries a slice of integers.
+func Int64s(key string, nums []int64) Field {
+ return Array(key, int64s(nums))
+}
+
+// Int32s constructs a field that carries a slice of integers.
+func Int32s(key string, nums []int32) Field {
+ return Array(key, int32s(nums))
+}
+
+// Int16s constructs a field that carries a slice of integers.
+func Int16s(key string, nums []int16) Field {
+ return Array(key, int16s(nums))
+}
+
+// Int8s constructs a field that carries a slice of integers.
+func Int8s(key string, nums []int8) Field {
+ return Array(key, int8s(nums))
+}
+
+// Strings constructs a field that carries a slice of strings.
+func Strings(key string, ss []string) Field {
+ return Array(key, stringArray(ss))
+}
+
+// Times constructs a field that carries a slice of time.Times.
+func Times(key string, ts []time.Time) Field {
+ return Array(key, times(ts))
+}
+
+// Uints constructs a field that carries a slice of unsigned integers.
+func Uints(key string, nums []uint) Field {
+ return Array(key, uints(nums))
+}
+
+// Uint64s constructs a field that carries a slice of unsigned integers.
+func Uint64s(key string, nums []uint64) Field {
+ return Array(key, uint64s(nums))
+}
+
+// Uint32s constructs a field that carries a slice of unsigned integers.
+func Uint32s(key string, nums []uint32) Field {
+ return Array(key, uint32s(nums))
+}
+
+// Uint16s constructs a field that carries a slice of unsigned integers.
+func Uint16s(key string, nums []uint16) Field {
+ return Array(key, uint16s(nums))
+}
+
+// Uint8s constructs a field that carries a slice of unsigned integers.
+func Uint8s(key string, nums []uint8) Field {
+ return Array(key, uint8s(nums))
+}
+
+// Uintptrs constructs a field that carries a slice of pointer addresses.
+func Uintptrs(key string, us []uintptr) Field {
+ return Array(key, uintptrs(us))
+}
+
+// Errors constructs a field that carries a slice of errors.
+func Errors(key string, errs []error) Field {
+ return Array(key, errArray(errs))
+}
+
+type bools []bool
+
+func (bs bools) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bs {
+ arr.AppendBool(bs[i])
+ }
+ return nil
+}
+
+type byteStringsArray [][]byte
+
+func (bss byteStringsArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range bss {
+ arr.AppendByteString(bss[i])
+ }
+ return nil
+}
+
+type complex128s []complex128
+
+func (nums complex128s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex128(nums[i])
+ }
+ return nil
+}
+
+type complex64s []complex64
+
+func (nums complex64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendComplex64(nums[i])
+ }
+ return nil
+}
+
+type durations []time.Duration
+
+func (ds durations) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ds {
+ arr.AppendDuration(ds[i])
+ }
+ return nil
+}
+
+type float64s []float64
+
+func (nums float64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat64(nums[i])
+ }
+ return nil
+}
+
+type float32s []float32
+
+func (nums float32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendFloat32(nums[i])
+ }
+ return nil
+}
+
+type ints []int
+
+func (nums ints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt(nums[i])
+ }
+ return nil
+}
+
+type int64s []int64
+
+func (nums int64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt64(nums[i])
+ }
+ return nil
+}
+
+type int32s []int32
+
+func (nums int32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt32(nums[i])
+ }
+ return nil
+}
+
+type int16s []int16
+
+func (nums int16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt16(nums[i])
+ }
+ return nil
+}
+
+type int8s []int8
+
+func (nums int8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendInt8(nums[i])
+ }
+ return nil
+}
+
+type stringArray []string
+
+func (ss stringArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ss {
+ arr.AppendString(ss[i])
+ }
+ return nil
+}
+
+type times []time.Time
+
+func (ts times) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range ts {
+ arr.AppendTime(ts[i])
+ }
+ return nil
+}
+
+type uints []uint
+
+func (nums uints) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint(nums[i])
+ }
+ return nil
+}
+
+type uint64s []uint64
+
+func (nums uint64s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint64(nums[i])
+ }
+ return nil
+}
+
+type uint32s []uint32
+
+func (nums uint32s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint32(nums[i])
+ }
+ return nil
+}
+
+type uint16s []uint16
+
+func (nums uint16s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint16(nums[i])
+ }
+ return nil
+}
+
+type uint8s []uint8
+
+func (nums uint8s) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUint8(nums[i])
+ }
+ return nil
+}
+
+type uintptrs []uintptr
+
+func (nums uintptrs) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range nums {
+ arr.AppendUintptr(nums[i])
+ }
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/buffer/buffer.go b/vendor/go.uber.org/zap/buffer/buffer.go
new file mode 100644
index 0000000..9e929cd
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/buffer.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package buffer provides a thin wrapper around a byte slice. Unlike the
+// standard library's bytes.Buffer, it supports a portion of the strconv
+// package's zero-allocation formatters.
+package buffer // import "go.uber.org/zap/buffer"
+
+import (
+ "strconv"
+ "time"
+)
+
+const _size = 1024 // by default, create 1 KiB buffers
+
+// Buffer is a thin wrapper around a byte slice. It's intended to be pooled, so
+// the only way to construct one is via a Pool.
+type Buffer struct {
+ bs []byte
+ pool Pool
+}
+
+// AppendByte writes a single byte to the Buffer.
+func (b *Buffer) AppendByte(v byte) {
+ b.bs = append(b.bs, v)
+}
+
+// AppendString writes a string to the Buffer.
+func (b *Buffer) AppendString(s string) {
+ b.bs = append(b.bs, s...)
+}
+
+// AppendInt appends an integer to the underlying buffer (assuming base 10).
+func (b *Buffer) AppendInt(i int64) {
+ b.bs = strconv.AppendInt(b.bs, i, 10)
+}
+
+// AppendTime appends the time formatted using the specified layout.
+func (b *Buffer) AppendTime(t time.Time, layout string) {
+ b.bs = t.AppendFormat(b.bs, layout)
+}
+
+// AppendUint appends an unsigned integer to the underlying buffer (assuming
+// base 10).
+func (b *Buffer) AppendUint(i uint64) {
+ b.bs = strconv.AppendUint(b.bs, i, 10)
+}
+
+// AppendBool appends a bool to the underlying buffer.
+func (b *Buffer) AppendBool(v bool) {
+ b.bs = strconv.AppendBool(b.bs, v)
+}
+
+// AppendFloat appends a float to the underlying buffer. It doesn't quote NaN
+// or +/- Inf.
+func (b *Buffer) AppendFloat(f float64, bitSize int) {
+ b.bs = strconv.AppendFloat(b.bs, f, 'f', -1, bitSize)
+}
+
+// Len returns the length of the underlying byte slice.
+func (b *Buffer) Len() int {
+ return len(b.bs)
+}
+
+// Cap returns the capacity of the underlying byte slice.
+func (b *Buffer) Cap() int {
+ return cap(b.bs)
+}
+
+// Bytes returns a mutable reference to the underlying byte slice.
+func (b *Buffer) Bytes() []byte {
+ return b.bs
+}
+
+// String returns a string copy of the underlying byte slice.
+func (b *Buffer) String() string {
+ return string(b.bs)
+}
+
+// Reset resets the underlying byte slice. Subsequent writes re-use the slice's
+// backing array.
+func (b *Buffer) Reset() {
+ b.bs = b.bs[:0]
+}
+
+// Write implements io.Writer.
+func (b *Buffer) Write(bs []byte) (int, error) {
+ b.bs = append(b.bs, bs...)
+ return len(bs), nil
+}
+
+// WriteByte writes a single byte to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteByte(v byte) error {
+ b.AppendByte(v)
+ return nil
+}
+
+// WriteString writes a string to the Buffer.
+//
+// Error returned is always nil, function signature is compatible
+// with bytes.Buffer and bufio.Writer
+func (b *Buffer) WriteString(s string) (int, error) {
+ b.AppendString(s)
+ return len(s), nil
+}
+
+// TrimNewline trims any final "\n" byte from the end of the buffer.
+func (b *Buffer) TrimNewline() {
+ if i := len(b.bs) - 1; i >= 0 {
+ if b.bs[i] == '\n' {
+ b.bs = b.bs[:i]
+ }
+ }
+}
+
+// Free returns the Buffer to its Pool.
+//
+// Callers must not retain references to the Buffer after calling Free.
+func (b *Buffer) Free() {
+ b.pool.put(b)
+}
diff --git a/vendor/go.uber.org/zap/buffer/pool.go b/vendor/go.uber.org/zap/buffer/pool.go
new file mode 100644
index 0000000..8fb3e20
--- /dev/null
+++ b/vendor/go.uber.org/zap/buffer/pool.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package buffer
+
+import "sync"
+
+// A Pool is a type-safe wrapper around a sync.Pool.
+type Pool struct {
+ p *sync.Pool
+}
+
+// NewPool constructs a new Pool.
+func NewPool() Pool {
+ return Pool{p: &sync.Pool{
+ New: func() interface{} {
+ return &Buffer{bs: make([]byte, 0, _size)}
+ },
+ }}
+}
+
+// Get retrieves a Buffer from the pool, creating one if necessary.
+func (p Pool) Get() *Buffer {
+ buf := p.p.Get().(*Buffer)
+ buf.Reset()
+ buf.pool = p
+ return buf
+}
+
+func (p Pool) put(buf *Buffer) {
+ p.p.Put(buf)
+}
diff --git a/vendor/go.uber.org/zap/checklicense.sh b/vendor/go.uber.org/zap/checklicense.sh
new file mode 100644
index 0000000..345ac8b
--- /dev/null
+++ b/vendor/go.uber.org/zap/checklicense.sh
@@ -0,0 +1,17 @@
+#!/bin/bash -e
+
+ERROR_COUNT=0
+while read -r file
+do
+ case "$(head -1 "${file}")" in
+ *"Copyright (c) "*" Uber Technologies, Inc.")
+ # everything's cool
+ ;;
+ *)
+ echo "$file is missing license header."
+ (( ERROR_COUNT++ ))
+ ;;
+ esac
+done < <(git ls-files "*\.go")
+
+exit $ERROR_COUNT
diff --git a/vendor/go.uber.org/zap/config.go b/vendor/go.uber.org/zap/config.go
new file mode 100644
index 0000000..55637fb
--- /dev/null
+++ b/vendor/go.uber.org/zap/config.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "sort"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// SamplingConfig sets a sampling strategy for the logger. Sampling caps the
+// global CPU and I/O load that logging puts on your process while attempting
+// to preserve a representative subset of your logs.
+//
+// If specified, the Sampler will invoke the Hook after each decision.
+//
+// Values configured here are per-second. See zapcore.NewSamplerWithOptions for
+// details.
+type SamplingConfig struct {
+ Initial int `json:"initial" yaml:"initial"`
+ Thereafter int `json:"thereafter" yaml:"thereafter"`
+ Hook func(zapcore.Entry, zapcore.SamplingDecision) `json:"-" yaml:"-"`
+}
+
+// Config offers a declarative way to construct a logger. It doesn't do
+// anything that can't be done with New, Options, and the various
+// zapcore.WriteSyncer and zapcore.Core wrappers, but it's a simpler way to
+// toggle common options.
+//
+// Note that Config intentionally supports only the most common options. More
+// unusual logging setups (logging to network connections or message queues,
+// splitting output between multiple files, etc.) are possible, but require
+// direct use of the zapcore package. For sample code, see the package-level
+// BasicConfiguration and AdvancedConfiguration examples.
+//
+// For an example showing runtime log level changes, see the documentation for
+// AtomicLevel.
+type Config struct {
+ // Level is the minimum enabled logging level. Note that this is a dynamic
+ // level, so calling Config.Level.SetLevel will atomically change the log
+ // level of all loggers descended from this config.
+ Level AtomicLevel `json:"level" yaml:"level"`
+ // Development puts the logger in development mode, which changes the
+ // behavior of DPanicLevel and takes stacktraces more liberally.
+ Development bool `json:"development" yaml:"development"`
+ // DisableCaller stops annotating logs with the calling function's file
+ // name and line number. By default, all logs are annotated.
+ DisableCaller bool `json:"disableCaller" yaml:"disableCaller"`
+ // DisableStacktrace completely disables automatic stacktrace capturing. By
+ // default, stacktraces are captured for WarnLevel and above logs in
+ // development and ErrorLevel and above in production.
+ DisableStacktrace bool `json:"disableStacktrace" yaml:"disableStacktrace"`
+ // Sampling sets a sampling policy. A nil SamplingConfig disables sampling.
+ Sampling *SamplingConfig `json:"sampling" yaml:"sampling"`
+ // Encoding sets the logger's encoding. Valid values are "json" and
+ // "console", as well as any third-party encodings registered via
+ // RegisterEncoder.
+ Encoding string `json:"encoding" yaml:"encoding"`
+ // EncoderConfig sets options for the chosen encoder. See
+ // zapcore.EncoderConfig for details.
+ EncoderConfig zapcore.EncoderConfig `json:"encoderConfig" yaml:"encoderConfig"`
+ // OutputPaths is a list of URLs or file paths to write logging output to.
+ // See Open for details.
+ OutputPaths []string `json:"outputPaths" yaml:"outputPaths"`
+ // ErrorOutputPaths is a list of URLs to write internal logger errors to.
+ // The default is standard error.
+ //
+ // Note that this setting only affects internal errors; for sample code that
+ // sends error-level logs to a different location from info- and debug-level
+ // logs, see the package-level AdvancedConfiguration example.
+ ErrorOutputPaths []string `json:"errorOutputPaths" yaml:"errorOutputPaths"`
+ // InitialFields is a collection of fields to add to the root logger.
+ InitialFields map[string]interface{} `json:"initialFields" yaml:"initialFields"`
+}
+
+// NewProductionEncoderConfig returns an opinionated EncoderConfig for
+// production environments.
+func NewProductionEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ TimeKey: "ts",
+ LevelKey: "level",
+ NameKey: "logger",
+ CallerKey: "caller",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "msg",
+ StacktraceKey: "stacktrace",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.EpochTimeEncoder,
+ EncodeDuration: zapcore.SecondsDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewProductionConfig is a reasonable production logging configuration.
+// Logging is enabled at InfoLevel and above.
+//
+// It uses a JSON encoder, writes to standard error, and enables sampling.
+// Stacktraces are automatically included on logs of ErrorLevel and above.
+func NewProductionConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(InfoLevel),
+ Development: false,
+ Sampling: &SamplingConfig{
+ Initial: 100,
+ Thereafter: 100,
+ },
+ Encoding: "json",
+ EncoderConfig: NewProductionEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// NewDevelopmentEncoderConfig returns an opinionated EncoderConfig for
+// development environments.
+func NewDevelopmentEncoderConfig() zapcore.EncoderConfig {
+ return zapcore.EncoderConfig{
+ // Keys can be anything except the empty string.
+ TimeKey: "T",
+ LevelKey: "L",
+ NameKey: "N",
+ CallerKey: "C",
+ FunctionKey: zapcore.OmitKey,
+ MessageKey: "M",
+ StacktraceKey: "S",
+ LineEnding: zapcore.DefaultLineEnding,
+ EncodeLevel: zapcore.CapitalLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ EncodeCaller: zapcore.ShortCallerEncoder,
+ }
+}
+
+// NewDevelopmentConfig is a reasonable development logging configuration.
+// Logging is enabled at DebugLevel and above.
+//
+// It enables development mode (which makes DPanicLevel logs panic), uses a
+// console encoder, writes to standard error, and disables sampling.
+// Stacktraces are automatically included on logs of WarnLevel and above.
+func NewDevelopmentConfig() Config {
+ return Config{
+ Level: NewAtomicLevelAt(DebugLevel),
+ Development: true,
+ Encoding: "console",
+ EncoderConfig: NewDevelopmentEncoderConfig(),
+ OutputPaths: []string{"stderr"},
+ ErrorOutputPaths: []string{"stderr"},
+ }
+}
+
+// Build constructs a logger from the Config and Options.
+func (cfg Config) Build(opts ...Option) (*Logger, error) {
+ enc, err := cfg.buildEncoder()
+ if err != nil {
+ return nil, err
+ }
+
+ sink, errSink, err := cfg.openSinks()
+ if err != nil {
+ return nil, err
+ }
+
+ if cfg.Level == (AtomicLevel{}) {
+ return nil, fmt.Errorf("missing Level")
+ }
+
+ log := New(
+ zapcore.NewCore(enc, sink, cfg.Level),
+ cfg.buildOptions(errSink)...,
+ )
+ if len(opts) > 0 {
+ log = log.WithOptions(opts...)
+ }
+ return log, nil
+}
+
+func (cfg Config) buildOptions(errSink zapcore.WriteSyncer) []Option {
+ opts := []Option{ErrorOutput(errSink)}
+
+ if cfg.Development {
+ opts = append(opts, Development())
+ }
+
+ if !cfg.DisableCaller {
+ opts = append(opts, AddCaller())
+ }
+
+ stackLevel := ErrorLevel
+ if cfg.Development {
+ stackLevel = WarnLevel
+ }
+ if !cfg.DisableStacktrace {
+ opts = append(opts, AddStacktrace(stackLevel))
+ }
+
+ if scfg := cfg.Sampling; scfg != nil {
+ opts = append(opts, WrapCore(func(core zapcore.Core) zapcore.Core {
+ var samplerOpts []zapcore.SamplerOption
+ if scfg.Hook != nil {
+ samplerOpts = append(samplerOpts, zapcore.SamplerHook(scfg.Hook))
+ }
+ return zapcore.NewSamplerWithOptions(
+ core,
+ time.Second,
+ cfg.Sampling.Initial,
+ cfg.Sampling.Thereafter,
+ samplerOpts...,
+ )
+ }))
+ }
+
+ if len(cfg.InitialFields) > 0 {
+ fs := make([]Field, 0, len(cfg.InitialFields))
+ keys := make([]string, 0, len(cfg.InitialFields))
+ for k := range cfg.InitialFields {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ for _, k := range keys {
+ fs = append(fs, Any(k, cfg.InitialFields[k]))
+ }
+ opts = append(opts, Fields(fs...))
+ }
+
+ return opts
+}
+
+func (cfg Config) openSinks() (zapcore.WriteSyncer, zapcore.WriteSyncer, error) {
+ sink, closeOut, err := Open(cfg.OutputPaths...)
+ if err != nil {
+ return nil, nil, err
+ }
+ errSink, _, err := Open(cfg.ErrorOutputPaths...)
+ if err != nil {
+ closeOut()
+ return nil, nil, err
+ }
+ return sink, errSink, nil
+}
+
+func (cfg Config) buildEncoder() (zapcore.Encoder, error) {
+ return newEncoder(cfg.Encoding, cfg.EncoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/doc.go b/vendor/go.uber.org/zap/doc.go
new file mode 100644
index 0000000..8638dd1
--- /dev/null
+++ b/vendor/go.uber.org/zap/doc.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zap provides fast, structured, leveled logging.
+//
+// For applications that log in the hot path, reflection-based serialization
+// and string formatting are prohibitively expensive - they're CPU-intensive
+// and make many small allocations. Put differently, using json.Marshal and
+// fmt.Fprintf to log tons of interface{} makes your application slow.
+//
+// Zap takes a different approach. It includes a reflection-free,
+// zero-allocation JSON encoder, and the base Logger strives to avoid
+// serialization overhead and allocations wherever possible. By building the
+// high-level SugaredLogger on that foundation, zap lets users choose when
+// they need to count every allocation and when they'd prefer a more familiar,
+// loosely typed API.
+//
+// Choosing a Logger
+//
+// In contexts where performance is nice, but not critical, use the
+// SugaredLogger. It's 4-10x faster than other structured logging packages and
+// supports both structured and printf-style logging. Like log15 and go-kit,
+// the SugaredLogger's structured logging APIs are loosely typed and accept a
+// variadic number of key-value pairs. (For more advanced use cases, they also
+// accept strongly typed fields - see the SugaredLogger.With documentation for
+// details.)
+// sugar := zap.NewExample().Sugar()
+// defer sugar.Sync()
+// sugar.Infow("failed to fetch URL",
+// "url", "http://example.com",
+// "attempt", 3,
+// "backoff", time.Second,
+// )
+// sugar.Infof("failed to fetch URL: %s", "http://example.com")
+//
+// By default, loggers are unbuffered. However, since zap's low-level APIs
+// allow buffering, calling Sync before letting your process exit is a good
+// habit.
+//
+// In the rare contexts where every microsecond and every allocation matter,
+// use the Logger. It's even faster than the SugaredLogger and allocates far
+// less, but it only supports strongly-typed, structured logging.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// logger.Info("failed to fetch URL",
+// zap.String("url", "http://example.com"),
+// zap.Int("attempt", 3),
+// zap.Duration("backoff", time.Second),
+// )
+//
+// Choosing between the Logger and SugaredLogger doesn't need to be an
+// application-wide decision: converting between the two is simple and
+// inexpensive.
+// logger := zap.NewExample()
+// defer logger.Sync()
+// sugar := logger.Sugar()
+// plain := sugar.Desugar()
+//
+// Configuring Zap
+//
+// The simplest way to build a Logger is to use zap's opinionated presets:
+// NewExample, NewProduction, and NewDevelopment. These presets build a logger
+// with a single function call:
+// logger, err := zap.NewProduction()
+// if err != nil {
+// log.Fatalf("can't initialize zap logger: %v", err)
+// }
+// defer logger.Sync()
+//
+// Presets are fine for small projects, but larger projects and organizations
+// naturally require a bit more customization. For most users, zap's Config
+// struct strikes the right balance between flexibility and convenience. See
+// the package-level BasicConfiguration example for sample code.
+//
+// More unusual configurations (splitting output between files, sending logs
+// to a message queue, etc.) are possible, but require direct use of
+// go.uber.org/zap/zapcore. See the package-level AdvancedConfiguration
+// example for sample code.
+//
+// Extending Zap
+//
+// The zap package itself is a relatively thin wrapper around the interfaces
+// in go.uber.org/zap/zapcore. Extending zap to support a new encoding (e.g.,
+// BSON), a new log sink (e.g., Kafka), or something more exotic (perhaps an
+// exception aggregation service, like Sentry or Rollbar) typically requires
+// implementing the zapcore.Encoder, zapcore.WriteSyncer, or zapcore.Core
+// interfaces. See the zapcore documentation for details.
+//
+// Similarly, package authors can use the high-performance Encoder and Core
+// implementations in the zapcore package to build their own loggers.
+//
+// Frequently Asked Questions
+//
+// An FAQ covering everything from installation errors to design decisions is
+// available at https://github.com/uber-go/zap/blob/master/FAQ.md.
+package zap // import "go.uber.org/zap"
diff --git a/vendor/go.uber.org/zap/encoder.go b/vendor/go.uber.org/zap/encoder.go
new file mode 100644
index 0000000..08ed833
--- /dev/null
+++ b/vendor/go.uber.org/zap/encoder.go
@@ -0,0 +1,79 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var (
+ errNoEncoderNameSpecified = errors.New("no encoder name specified")
+
+ _encoderNameToConstructor = map[string]func(zapcore.EncoderConfig) (zapcore.Encoder, error){
+ "console": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewConsoleEncoder(encoderConfig), nil
+ },
+ "json": func(encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ return zapcore.NewJSONEncoder(encoderConfig), nil
+ },
+ }
+ _encoderMutex sync.RWMutex
+)
+
+// RegisterEncoder registers an encoder constructor, which the Config struct
+// can then reference. By default, the "json" and "console" encoders are
+// registered.
+//
+// Attempting to register an encoder whose name is already taken returns an
+// error.
+func RegisterEncoder(name string, constructor func(zapcore.EncoderConfig) (zapcore.Encoder, error)) error {
+ _encoderMutex.Lock()
+ defer _encoderMutex.Unlock()
+ if name == "" {
+ return errNoEncoderNameSpecified
+ }
+ if _, ok := _encoderNameToConstructor[name]; ok {
+ return fmt.Errorf("encoder already registered for name %q", name)
+ }
+ _encoderNameToConstructor[name] = constructor
+ return nil
+}
+
+func newEncoder(name string, encoderConfig zapcore.EncoderConfig) (zapcore.Encoder, error) {
+ if encoderConfig.TimeKey != "" && encoderConfig.EncodeTime == nil {
+ return nil, fmt.Errorf("missing EncodeTime in EncoderConfig")
+ }
+
+ _encoderMutex.RLock()
+ defer _encoderMutex.RUnlock()
+ if name == "" {
+ return nil, errNoEncoderNameSpecified
+ }
+ constructor, ok := _encoderNameToConstructor[name]
+ if !ok {
+ return nil, fmt.Errorf("no encoder registered for name %q", name)
+ }
+ return constructor(encoderConfig)
+}
diff --git a/vendor/go.uber.org/zap/error.go b/vendor/go.uber.org/zap/error.go
new file mode 100644
index 0000000..65982a5
--- /dev/null
+++ b/vendor/go.uber.org/zap/error.go
@@ -0,0 +1,80 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Error is shorthand for the common idiom NamedError("error", err).
+func Error(err error) Field {
+ return NamedError("error", err)
+}
+
+// NamedError constructs a field that lazily stores err.Error() under the
+// provided key. Errors which also implement fmt.Formatter (like those produced
+// by github.com/pkg/errors) will also have their verbose representation stored
+// under key+"Verbose". If passed a nil error, the field is a no-op.
+//
+// For the common case in which the key is simply "error", the Error function
+// is shorter and less repetitive.
+func NamedError(key string, err error) Field {
+ if err == nil {
+ return Skip()
+ }
+ return Field{Key: key, Type: zapcore.ErrorType, Interface: err}
+}
+
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr zapcore.ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+ // To represent each error as an object with an "error" attribute and
+ // potentially an "errorVerbose" attribute, we need to wrap it in a
+ // type that implements LogObjectMarshaler. To prevent this from
+ // allocating, pool the wrapper type.
+ elem := _errArrayElemPool.Get().(*errArrayElem)
+ elem.error = errs[i]
+ arr.AppendObject(elem)
+ elem.error = nil
+ _errArrayElemPool.Put(elem)
+ }
+ return nil
+}
+
+type errArrayElem struct {
+ error
+}
+
+func (e *errArrayElem) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ // Re-use the error field's logic, which supports non-standard error types.
+ Error(e.error).AddTo(enc)
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/field.go b/vendor/go.uber.org/zap/field.go
new file mode 100644
index 0000000..bbb745d
--- /dev/null
+++ b/vendor/go.uber.org/zap/field.go
@@ -0,0 +1,549 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "math"
+ "time"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// Field is an alias for Field. Aliasing this type dramatically
+// improves the navigability of this package's API documentation.
+type Field = zapcore.Field
+
+var (
+ _minTimeInt64 = time.Unix(0, math.MinInt64)
+ _maxTimeInt64 = time.Unix(0, math.MaxInt64)
+)
+
+// Skip constructs a no-op field, which is often useful when handling invalid
+// inputs in other Field constructors.
+func Skip() Field {
+ return Field{Type: zapcore.SkipType}
+}
+
+// nilField returns a field which will marshal explicitly as nil. See motivation
+// in https://github.com/uber-go/zap/issues/753 . If we ever make breaking
+// changes and add zapcore.NilType and zapcore.ObjectEncoder.AddNil, the
+// implementation here should be changed to reflect that.
+func nilField(key string) Field { return Reflect(key, nil) }
+
+// Binary constructs a field that carries an opaque binary blob.
+//
+// Binary data is serialized in an encoding-appropriate format. For example,
+// zap's JSON encoder base64-encodes binary blobs. To log UTF-8 encoded text,
+// use ByteString.
+func Binary(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.BinaryType, Interface: val}
+}
+
+// Bool constructs a field that carries a bool.
+func Bool(key string, val bool) Field {
+ var ival int64
+ if val {
+ ival = 1
+ }
+ return Field{Key: key, Type: zapcore.BoolType, Integer: ival}
+}
+
+// Boolp constructs a field that carries a *bool. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Boolp(key string, val *bool) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Bool(key, *val)
+}
+
+// ByteString constructs a field that carries UTF-8 encoded text as a []byte.
+// To log opaque binary blobs (which aren't necessarily valid UTF-8), use
+// Binary.
+func ByteString(key string, val []byte) Field {
+ return Field{Key: key, Type: zapcore.ByteStringType, Interface: val}
+}
+
+// Complex128 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex128 to
+// interface{}).
+func Complex128(key string, val complex128) Field {
+ return Field{Key: key, Type: zapcore.Complex128Type, Interface: val}
+}
+
+// Complex128p constructs a field that carries a *complex128. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex128p(key string, val *complex128) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex128(key, *val)
+}
+
+// Complex64 constructs a field that carries a complex number. Unlike most
+// numeric fields, this costs an allocation (to convert the complex64 to
+// interface{}).
+func Complex64(key string, val complex64) Field {
+ return Field{Key: key, Type: zapcore.Complex64Type, Interface: val}
+}
+
+// Complex64p constructs a field that carries a *complex64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Complex64p(key string, val *complex64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Complex64(key, *val)
+}
+
+// Float64 constructs a field that carries a float64. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float64(key string, val float64) Field {
+ return Field{Key: key, Type: zapcore.Float64Type, Integer: int64(math.Float64bits(val))}
+}
+
+// Float64p constructs a field that carries a *float64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float64p(key string, val *float64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float64(key, *val)
+}
+
+// Float32 constructs a field that carries a float32. The way the
+// floating-point value is represented is encoder-dependent, so marshaling is
+// necessarily lazy.
+func Float32(key string, val float32) Field {
+ return Field{Key: key, Type: zapcore.Float32Type, Integer: int64(math.Float32bits(val))}
+}
+
+// Float32p constructs a field that carries a *float32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Float32p(key string, val *float32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Float32(key, *val)
+}
+
+// Int constructs a field with the given key and value.
+func Int(key string, val int) Field {
+ return Int64(key, int64(val))
+}
+
+// Intp constructs a field that carries a *int. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Intp(key string, val *int) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int(key, *val)
+}
+
+// Int64 constructs a field with the given key and value.
+func Int64(key string, val int64) Field {
+ return Field{Key: key, Type: zapcore.Int64Type, Integer: val}
+}
+
+// Int64p constructs a field that carries a *int64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int64p(key string, val *int64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int64(key, *val)
+}
+
+// Int32 constructs a field with the given key and value.
+func Int32(key string, val int32) Field {
+ return Field{Key: key, Type: zapcore.Int32Type, Integer: int64(val)}
+}
+
+// Int32p constructs a field that carries a *int32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int32p(key string, val *int32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int32(key, *val)
+}
+
+// Int16 constructs a field with the given key and value.
+func Int16(key string, val int16) Field {
+ return Field{Key: key, Type: zapcore.Int16Type, Integer: int64(val)}
+}
+
+// Int16p constructs a field that carries a *int16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int16p(key string, val *int16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int16(key, *val)
+}
+
+// Int8 constructs a field with the given key and value.
+func Int8(key string, val int8) Field {
+ return Field{Key: key, Type: zapcore.Int8Type, Integer: int64(val)}
+}
+
+// Int8p constructs a field that carries a *int8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Int8p(key string, val *int8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Int8(key, *val)
+}
+
+// String constructs a field with the given key and value.
+func String(key string, val string) Field {
+ return Field{Key: key, Type: zapcore.StringType, String: val}
+}
+
+// Stringp constructs a field that carries a *string. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Stringp(key string, val *string) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return String(key, *val)
+}
+
+// Uint constructs a field with the given key and value.
+func Uint(key string, val uint) Field {
+ return Uint64(key, uint64(val))
+}
+
+// Uintp constructs a field that carries a *uint. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintp(key string, val *uint) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint(key, *val)
+}
+
+// Uint64 constructs a field with the given key and value.
+func Uint64(key string, val uint64) Field {
+ return Field{Key: key, Type: zapcore.Uint64Type, Integer: int64(val)}
+}
+
+// Uint64p constructs a field that carries a *uint64. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint64p(key string, val *uint64) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint64(key, *val)
+}
+
+// Uint32 constructs a field with the given key and value.
+func Uint32(key string, val uint32) Field {
+ return Field{Key: key, Type: zapcore.Uint32Type, Integer: int64(val)}
+}
+
+// Uint32p constructs a field that carries a *uint32. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint32p(key string, val *uint32) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint32(key, *val)
+}
+
+// Uint16 constructs a field with the given key and value.
+func Uint16(key string, val uint16) Field {
+ return Field{Key: key, Type: zapcore.Uint16Type, Integer: int64(val)}
+}
+
+// Uint16p constructs a field that carries a *uint16. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint16p(key string, val *uint16) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint16(key, *val)
+}
+
+// Uint8 constructs a field with the given key and value.
+func Uint8(key string, val uint8) Field {
+ return Field{Key: key, Type: zapcore.Uint8Type, Integer: int64(val)}
+}
+
+// Uint8p constructs a field that carries a *uint8. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uint8p(key string, val *uint8) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uint8(key, *val)
+}
+
+// Uintptr constructs a field with the given key and value.
+func Uintptr(key string, val uintptr) Field {
+ return Field{Key: key, Type: zapcore.UintptrType, Integer: int64(val)}
+}
+
+// Uintptrp constructs a field that carries a *uintptr. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Uintptrp(key string, val *uintptr) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Uintptr(key, *val)
+}
+
+// Reflect constructs a field with the given key and an arbitrary object. It uses
+// an encoding-appropriate, reflection-based function to lazily serialize nearly
+// any object into the logging context, but it's relatively slow and
+// allocation-heavy. Outside tests, Any is always a better choice.
+//
+// If encoding fails (e.g., trying to serialize a map[int]string to JSON), Reflect
+// includes the error message in the final log output.
+func Reflect(key string, val interface{}) Field {
+ return Field{Key: key, Type: zapcore.ReflectType, Interface: val}
+}
+
+// Namespace creates a named, isolated scope within the logger's context. All
+// subsequent fields will be added to the new namespace.
+//
+// This helps prevent key collisions when injecting loggers into sub-components
+// or third-party libraries.
+func Namespace(key string) Field {
+ return Field{Key: key, Type: zapcore.NamespaceType}
+}
+
+// Stringer constructs a field with the given key and the output of the value's
+// String method. The Stringer's String method is called lazily.
+func Stringer(key string, val fmt.Stringer) Field {
+ return Field{Key: key, Type: zapcore.StringerType, Interface: val}
+}
+
+// Time constructs a Field with the given key and value. The encoder
+// controls how the time is serialized.
+func Time(key string, val time.Time) Field {
+ if val.Before(_minTimeInt64) || val.After(_maxTimeInt64) {
+ return Field{Key: key, Type: zapcore.TimeFullType, Interface: val}
+ }
+ return Field{Key: key, Type: zapcore.TimeType, Integer: val.UnixNano(), Interface: val.Location()}
+}
+
+// Timep constructs a field that carries a *time.Time. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Timep(key string, val *time.Time) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Time(key, *val)
+}
+
+// Stack constructs a field that stores a stacktrace of the current goroutine
+// under provided key. Keep in mind that taking a stacktrace is eager and
+// expensive (relatively speaking); this function both makes an allocation and
+// takes about two microseconds.
+func Stack(key string) Field {
+ return StackSkip(key, 1) // skip Stack
+}
+
+// StackSkip constructs a field similarly to Stack, but also skips the given
+// number of frames from the top of the stacktrace.
+func StackSkip(key string, skip int) Field {
+ // Returning the stacktrace as a string costs an allocation, but saves us
+ // from expanding the zapcore.Field union struct to include a byte slice. Since
+ // taking a stacktrace is already so expensive (~10us), the extra allocation
+ // is okay.
+ return String(key, takeStacktrace(skip+1)) // skip StackSkip
+}
+
+// Duration constructs a field with the given key and value. The encoder
+// controls how the duration is serialized.
+func Duration(key string, val time.Duration) Field {
+ return Field{Key: key, Type: zapcore.DurationType, Integer: int64(val)}
+}
+
+// Durationp constructs a field that carries a *time.Duration. The returned Field will safely
+// and explicitly represent `nil` when appropriate.
+func Durationp(key string, val *time.Duration) Field {
+ if val == nil {
+ return nilField(key)
+ }
+ return Duration(key, *val)
+}
+
+// Object constructs a field with the given key and ObjectMarshaler. It
+// provides a flexible, but still type-safe and efficient, way to add map- or
+// struct-like user-defined types to the logging context. The struct's
+// MarshalLogObject method is called lazily.
+func Object(key string, val zapcore.ObjectMarshaler) Field {
+ return Field{Key: key, Type: zapcore.ObjectMarshalerType, Interface: val}
+}
+
+// Inline constructs a Field that is similar to Object, but it
+// will add the elements of the provided ObjectMarshaler to the
+// current namespace.
+func Inline(val zapcore.ObjectMarshaler) Field {
+ return zapcore.Field{
+ Type: zapcore.InlineMarshalerType,
+ Interface: val,
+ }
+}
+
+// Any takes a key and an arbitrary value and chooses the best way to represent
+// them as a field, falling back to a reflection-based approach only if
+// necessary.
+//
+// Since byte/uint8 and rune/int32 are aliases, Any can't differentiate between
+// them. To minimize surprises, []byte values are treated as binary blobs, byte
+// values are treated as uint8, and runes are always treated as integers.
+func Any(key string, value interface{}) Field {
+ switch val := value.(type) {
+ case zapcore.ObjectMarshaler:
+ return Object(key, val)
+ case zapcore.ArrayMarshaler:
+ return Array(key, val)
+ case bool:
+ return Bool(key, val)
+ case *bool:
+ return Boolp(key, val)
+ case []bool:
+ return Bools(key, val)
+ case complex128:
+ return Complex128(key, val)
+ case *complex128:
+ return Complex128p(key, val)
+ case []complex128:
+ return Complex128s(key, val)
+ case complex64:
+ return Complex64(key, val)
+ case *complex64:
+ return Complex64p(key, val)
+ case []complex64:
+ return Complex64s(key, val)
+ case float64:
+ return Float64(key, val)
+ case *float64:
+ return Float64p(key, val)
+ case []float64:
+ return Float64s(key, val)
+ case float32:
+ return Float32(key, val)
+ case *float32:
+ return Float32p(key, val)
+ case []float32:
+ return Float32s(key, val)
+ case int:
+ return Int(key, val)
+ case *int:
+ return Intp(key, val)
+ case []int:
+ return Ints(key, val)
+ case int64:
+ return Int64(key, val)
+ case *int64:
+ return Int64p(key, val)
+ case []int64:
+ return Int64s(key, val)
+ case int32:
+ return Int32(key, val)
+ case *int32:
+ return Int32p(key, val)
+ case []int32:
+ return Int32s(key, val)
+ case int16:
+ return Int16(key, val)
+ case *int16:
+ return Int16p(key, val)
+ case []int16:
+ return Int16s(key, val)
+ case int8:
+ return Int8(key, val)
+ case *int8:
+ return Int8p(key, val)
+ case []int8:
+ return Int8s(key, val)
+ case string:
+ return String(key, val)
+ case *string:
+ return Stringp(key, val)
+ case []string:
+ return Strings(key, val)
+ case uint:
+ return Uint(key, val)
+ case *uint:
+ return Uintp(key, val)
+ case []uint:
+ return Uints(key, val)
+ case uint64:
+ return Uint64(key, val)
+ case *uint64:
+ return Uint64p(key, val)
+ case []uint64:
+ return Uint64s(key, val)
+ case uint32:
+ return Uint32(key, val)
+ case *uint32:
+ return Uint32p(key, val)
+ case []uint32:
+ return Uint32s(key, val)
+ case uint16:
+ return Uint16(key, val)
+ case *uint16:
+ return Uint16p(key, val)
+ case []uint16:
+ return Uint16s(key, val)
+ case uint8:
+ return Uint8(key, val)
+ case *uint8:
+ return Uint8p(key, val)
+ case []byte:
+ return Binary(key, val)
+ case uintptr:
+ return Uintptr(key, val)
+ case *uintptr:
+ return Uintptrp(key, val)
+ case []uintptr:
+ return Uintptrs(key, val)
+ case time.Time:
+ return Time(key, val)
+ case *time.Time:
+ return Timep(key, val)
+ case []time.Time:
+ return Times(key, val)
+ case time.Duration:
+ return Duration(key, val)
+ case *time.Duration:
+ return Durationp(key, val)
+ case []time.Duration:
+ return Durations(key, val)
+ case error:
+ return NamedError(key, val)
+ case []error:
+ return Errors(key, val)
+ case fmt.Stringer:
+ return Stringer(key, val)
+ default:
+ return Reflect(key, val)
+ }
+}
diff --git a/vendor/go.uber.org/zap/flag.go b/vendor/go.uber.org/zap/flag.go
new file mode 100644
index 0000000..1312875
--- /dev/null
+++ b/vendor/go.uber.org/zap/flag.go
@@ -0,0 +1,39 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "flag"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// LevelFlag uses the standard library's flag.Var to declare a global flag
+// with the specified name, default, and usage guidance. The returned value is
+// a pointer to the value of the flag.
+//
+// If you don't want to use the flag package's global state, you can use any
+// non-nil *Level as a flag.Value with your own *flag.FlagSet.
+func LevelFlag(name string, defaultLevel zapcore.Level, usage string) *zapcore.Level {
+ lvl := defaultLevel
+ flag.Var(&lvl, name, usage)
+ return &lvl
+}
diff --git a/vendor/go.uber.org/zap/glide.yaml b/vendor/go.uber.org/zap/glide.yaml
new file mode 100644
index 0000000..8e1d05e
--- /dev/null
+++ b/vendor/go.uber.org/zap/glide.yaml
@@ -0,0 +1,34 @@
+package: go.uber.org/zap
+license: MIT
+import:
+- package: go.uber.org/atomic
+ version: ^1
+- package: go.uber.org/multierr
+ version: ^1
+testImport:
+- package: github.com/satori/go.uuid
+- package: github.com/sirupsen/logrus
+- package: github.com/apex/log
+ subpackages:
+ - handlers/json
+- package: github.com/go-kit/kit
+ subpackages:
+ - log
+- package: github.com/stretchr/testify
+ subpackages:
+ - assert
+ - require
+- package: gopkg.in/inconshreveable/log15.v2
+- package: github.com/mattn/goveralls
+- package: github.com/pborman/uuid
+- package: github.com/pkg/errors
+- package: github.com/rs/zerolog
+- package: golang.org/x/tools
+ subpackages:
+ - cover
+- package: golang.org/x/lint
+ subpackages:
+ - golint
+- package: github.com/axw/gocov
+ subpackages:
+ - gocov
diff --git a/vendor/go.uber.org/zap/global.go b/vendor/go.uber.org/zap/global.go
new file mode 100644
index 0000000..c1ac050
--- /dev/null
+++ b/vendor/go.uber.org/zap/global.go
@@ -0,0 +1,168 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "os"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ _loggerWriterDepth = 2
+ _programmerErrorTemplate = "You've found a bug in zap! Please file a bug at " +
+ "https://github.com/uber-go/zap/issues/new and reference this error: %v"
+)
+
+var (
+ _globalMu sync.RWMutex
+ _globalL = NewNop()
+ _globalS = _globalL.Sugar()
+)
+
+// L returns the global Logger, which can be reconfigured with ReplaceGlobals.
+// It's safe for concurrent use.
+func L() *Logger {
+ _globalMu.RLock()
+ l := _globalL
+ _globalMu.RUnlock()
+ return l
+}
+
+// S returns the global SugaredLogger, which can be reconfigured with
+// ReplaceGlobals. It's safe for concurrent use.
+func S() *SugaredLogger {
+ _globalMu.RLock()
+ s := _globalS
+ _globalMu.RUnlock()
+ return s
+}
+
+// ReplaceGlobals replaces the global Logger and SugaredLogger, and returns a
+// function to restore the original values. It's safe for concurrent use.
+func ReplaceGlobals(logger *Logger) func() {
+ _globalMu.Lock()
+ prev := _globalL
+ _globalL = logger
+ _globalS = logger.Sugar()
+ _globalMu.Unlock()
+ return func() { ReplaceGlobals(prev) }
+}
+
+// NewStdLog returns a *log.Logger which writes to the supplied zap Logger at
+// InfoLevel. To redirect the standard library's package-global logging
+// functions, use RedirectStdLog instead.
+func NewStdLog(l *Logger) *log.Logger {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ f := logger.Info
+ return log.New(&loggerWriter{f}, "" /* prefix */, 0 /* flags */)
+}
+
+// NewStdLogAt returns *log.Logger which writes to supplied zap logger at
+// required level.
+func NewStdLogAt(l *Logger, level zapcore.Level) (*log.Logger, error) {
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ return log.New(&loggerWriter{logFunc}, "" /* prefix */, 0 /* flags */), nil
+}
+
+// RedirectStdLog redirects output from the standard library's package-global
+// logger to the supplied logger at InfoLevel. Since zap already handles caller
+// annotations, timestamps, etc., it automatically disables the standard
+// library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLog(l *Logger) func() {
+ f, err := redirectStdLogAt(l, InfoLevel)
+ if err != nil {
+ // Can't get here, since passing InfoLevel to redirectStdLogAt always
+ // works.
+ panic(fmt.Sprintf(_programmerErrorTemplate, err))
+ }
+ return f
+}
+
+// RedirectStdLogAt redirects output from the standard library's package-global
+// logger to the supplied logger at the specified level. Since zap already
+// handles caller annotations, timestamps, etc., it automatically disables the
+// standard library's annotations and prefixing.
+//
+// It returns a function to restore the original prefix and flags and reset the
+// standard library's output to os.Stderr.
+func RedirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ return redirectStdLogAt(l, level)
+}
+
+func redirectStdLogAt(l *Logger, level zapcore.Level) (func(), error) {
+ flags := log.Flags()
+ prefix := log.Prefix()
+ log.SetFlags(0)
+ log.SetPrefix("")
+ logger := l.WithOptions(AddCallerSkip(_stdLogDefaultDepth + _loggerWriterDepth))
+ logFunc, err := levelToFunc(logger, level)
+ if err != nil {
+ return nil, err
+ }
+ log.SetOutput(&loggerWriter{logFunc})
+ return func() {
+ log.SetFlags(flags)
+ log.SetPrefix(prefix)
+ log.SetOutput(os.Stderr)
+ }, nil
+}
+
+func levelToFunc(logger *Logger, lvl zapcore.Level) (func(string, ...Field), error) {
+ switch lvl {
+ case DebugLevel:
+ return logger.Debug, nil
+ case InfoLevel:
+ return logger.Info, nil
+ case WarnLevel:
+ return logger.Warn, nil
+ case ErrorLevel:
+ return logger.Error, nil
+ case DPanicLevel:
+ return logger.DPanic, nil
+ case PanicLevel:
+ return logger.Panic, nil
+ case FatalLevel:
+ return logger.Fatal, nil
+ }
+ return nil, fmt.Errorf("unrecognized level: %q", lvl)
+}
+
+type loggerWriter struct {
+ logFunc func(msg string, fields ...Field)
+}
+
+func (l *loggerWriter) Write(p []byte) (int, error) {
+ p = bytes.TrimSpace(p)
+ l.logFunc(string(p))
+ return len(p), nil
+}
diff --git a/vendor/go.uber.org/zap/global_go112.go b/vendor/go.uber.org/zap/global_go112.go
new file mode 100644
index 0000000..6b5dbda
--- /dev/null
+++ b/vendor/go.uber.org/zap/global_go112.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// See #682 for more information.
+// +build go1.12
+
+package zap
+
+const _stdLogDefaultDepth = 1
diff --git a/vendor/go.uber.org/zap/global_prego112.go b/vendor/go.uber.org/zap/global_prego112.go
new file mode 100644
index 0000000..d3ab9af
--- /dev/null
+++ b/vendor/go.uber.org/zap/global_prego112.go
@@ -0,0 +1,26 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// See #682 for more information.
+// +build !go1.12
+
+package zap
+
+const _stdLogDefaultDepth = 2
diff --git a/vendor/go.uber.org/zap/go.mod b/vendor/go.uber.org/zap/go.mod
new file mode 100644
index 0000000..9455c99
--- /dev/null
+++ b/vendor/go.uber.org/zap/go.mod
@@ -0,0 +1,14 @@
+module go.uber.org/zap
+
+go 1.13
+
+require (
+ github.com/benbjohnson/clock v1.1.0
+ github.com/pkg/errors v0.8.1
+ github.com/stretchr/testify v1.7.0
+ go.uber.org/atomic v1.7.0
+ go.uber.org/goleak v1.1.10
+ go.uber.org/multierr v1.6.0
+ gopkg.in/yaml.v2 v2.2.8
+ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
+)
diff --git a/vendor/go.uber.org/zap/go.sum b/vendor/go.uber.org/zap/go.sum
new file mode 100644
index 0000000..b330071
--- /dev/null
+++ b/vendor/go.uber.org/zap/go.sum
@@ -0,0 +1,54 @@
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0 h1:4G4v2dO3VZwixGIRoQ5Lfboy6nUhCyYzaqnIAPPhYs4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11 h1:Yq9t9jnGoR+dBuitxdo9l6Q7xh/zOyNnYUtDKaQ3x0E=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/vendor/go.uber.org/zap/http_handler.go b/vendor/go.uber.org/zap/http_handler.go
new file mode 100644
index 0000000..1297c33
--- /dev/null
+++ b/vendor/go.uber.org/zap/http_handler.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// ServeHTTP is a simple JSON endpoint that can report on or change the current
+// logging level.
+//
+// GET
+//
+// The GET request returns a JSON description of the current logging level like:
+// {"level":"info"}
+//
+// PUT
+//
+// The PUT request changes the logging level. It is perfectly safe to change the
+// logging level while a program is running. Two content types are supported:
+//
+// Content-Type: application/x-www-form-urlencoded
+//
+// With this content type, the level can be provided through the request body or
+// a query parameter. The log level is URL encoded like:
+//
+// level=debug
+//
+// The request body takes precedence over the query parameter, if both are
+// specified.
+//
+// This content type is the default for a curl PUT request. Following are two
+// example curl requests that both set the logging level to debug.
+//
+// curl -X PUT localhost:8080/log/level?level=debug
+// curl -X PUT localhost:8080/log/level -d level=debug
+//
+// For any other content type, the payload is expected to be JSON encoded and
+// look like:
+//
+// {"level":"info"}
+//
+// An example curl request could look like this:
+//
+// curl -X PUT localhost:8080/log/level -H "Content-Type: application/json" -d '{"level":"debug"}'
+//
+func (lvl AtomicLevel) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ type errorResponse struct {
+ Error string `json:"error"`
+ }
+ type payload struct {
+ Level zapcore.Level `json:"level"`
+ }
+
+ enc := json.NewEncoder(w)
+
+ switch r.Method {
+ case http.MethodGet:
+ enc.Encode(payload{Level: lvl.Level()})
+ case http.MethodPut:
+ requestedLvl, err := decodePutRequest(r.Header.Get("Content-Type"), r)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ enc.Encode(errorResponse{Error: err.Error()})
+ return
+ }
+ lvl.SetLevel(requestedLvl)
+ enc.Encode(payload{Level: lvl.Level()})
+ default:
+ w.WriteHeader(http.StatusMethodNotAllowed)
+ enc.Encode(errorResponse{
+ Error: "Only GET and PUT are supported.",
+ })
+ }
+}
+
+// Decodes incoming PUT requests and returns the requested logging level.
+func decodePutRequest(contentType string, r *http.Request) (zapcore.Level, error) {
+ if contentType == "application/x-www-form-urlencoded" {
+ return decodePutURL(r)
+ }
+ return decodePutJSON(r.Body)
+}
+
+func decodePutURL(r *http.Request) (zapcore.Level, error) {
+ lvl := r.FormValue("level")
+ if lvl == "" {
+ return 0, fmt.Errorf("must specify logging level")
+ }
+ var l zapcore.Level
+ if err := l.UnmarshalText([]byte(lvl)); err != nil {
+ return 0, err
+ }
+ return l, nil
+}
+
+func decodePutJSON(body io.Reader) (zapcore.Level, error) {
+ var pld struct {
+ Level *zapcore.Level `json:"level"`
+ }
+ if err := json.NewDecoder(body).Decode(&pld); err != nil {
+ return 0, fmt.Errorf("malformed request body: %v", err)
+ }
+ if pld.Level == nil {
+ return 0, fmt.Errorf("must specify logging level")
+ }
+ return *pld.Level, nil
+
+}
diff --git a/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
new file mode 100644
index 0000000..dad583a
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/bufferpool/bufferpool.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package bufferpool houses zap's shared internal buffer pool. Third-party
+// packages can recreate the same functionality with buffers.NewPool.
+package bufferpool
+
+import "go.uber.org/zap/buffer"
+
+var (
+ _pool = buffer.NewPool()
+ // Get retrieves a buffer from the pool, creating one if necessary.
+ Get = _pool.Get
+)
diff --git a/vendor/go.uber.org/zap/internal/color/color.go b/vendor/go.uber.org/zap/internal/color/color.go
new file mode 100644
index 0000000..c4d5d02
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/color/color.go
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package color adds coloring functionality for TTY output.
+package color
+
+import "fmt"
+
+// Foreground colors.
+const (
+ Black Color = iota + 30
+ Red
+ Green
+ Yellow
+ Blue
+ Magenta
+ Cyan
+ White
+)
+
+// Color represents a text color.
+type Color uint8
+
+// Add adds the coloring to the given string.
+func (c Color) Add(s string) string {
+ return fmt.Sprintf("\x1b[%dm%s\x1b[0m", uint8(c), s)
+}
diff --git a/vendor/go.uber.org/zap/internal/exit/exit.go b/vendor/go.uber.org/zap/internal/exit/exit.go
new file mode 100644
index 0000000..dfc5b05
--- /dev/null
+++ b/vendor/go.uber.org/zap/internal/exit/exit.go
@@ -0,0 +1,64 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package exit provides stubs so that unit tests can exercise code that calls
+// os.Exit(1).
+package exit
+
+import "os"
+
+var real = func() { os.Exit(1) }
+
+// Exit normally terminates the process by calling os.Exit(1). If the package
+// is stubbed, it instead records a call in the testing spy.
+func Exit() {
+ real()
+}
+
+// A StubbedExit is a testing fake for os.Exit.
+type StubbedExit struct {
+ Exited bool
+ prev func()
+}
+
+// Stub substitutes a fake for the call to os.Exit(1).
+func Stub() *StubbedExit {
+ s := &StubbedExit{prev: real}
+ real = s.exit
+ return s
+}
+
+// WithStub runs the supplied function with Exit stubbed. It returns the stub
+// used, so that users can test whether the process would have crashed.
+func WithStub(f func()) *StubbedExit {
+ s := Stub()
+ defer s.Unstub()
+ f()
+ return s
+}
+
+// Unstub restores the previous exit function.
+func (se *StubbedExit) Unstub() {
+ real = se.prev
+}
+
+func (se *StubbedExit) exit() {
+ se.Exited = true
+}
diff --git a/vendor/go.uber.org/zap/level.go b/vendor/go.uber.org/zap/level.go
new file mode 100644
index 0000000..3567a9a
--- /dev/null
+++ b/vendor/go.uber.org/zap/level.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "go.uber.org/atomic"
+ "go.uber.org/zap/zapcore"
+)
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel = zapcore.DebugLevel
+ // InfoLevel is the default logging priority.
+ InfoLevel = zapcore.InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel = zapcore.WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel = zapcore.ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel = zapcore.DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel = zapcore.PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel = zapcore.FatalLevel
+)
+
+// LevelEnablerFunc is a convenient way to implement zapcore.LevelEnabler with
+// an anonymous function.
+//
+// It's particularly useful when splitting log output between different
+// outputs (e.g., standard error and standard out). For sample code, see the
+// package-level AdvancedConfiguration example.
+type LevelEnablerFunc func(zapcore.Level) bool
+
+// Enabled calls the wrapped function.
+func (f LevelEnablerFunc) Enabled(lvl zapcore.Level) bool { return f(lvl) }
+
+// An AtomicLevel is an atomically changeable, dynamic logging level. It lets
+// you safely change the log level of a tree of loggers (the root logger and
+// any children created by adding context) at runtime.
+//
+// The AtomicLevel itself is an http.Handler that serves a JSON endpoint to
+// alter its level.
+//
+// AtomicLevels must be created with the NewAtomicLevel constructor to allocate
+// their internal atomic pointer.
+type AtomicLevel struct {
+ l *atomic.Int32
+}
+
+// NewAtomicLevel creates an AtomicLevel with InfoLevel and above logging
+// enabled.
+func NewAtomicLevel() AtomicLevel {
+ return AtomicLevel{
+ l: atomic.NewInt32(int32(InfoLevel)),
+ }
+}
+
+// NewAtomicLevelAt is a convenience function that creates an AtomicLevel
+// and then calls SetLevel with the given level.
+func NewAtomicLevelAt(l zapcore.Level) AtomicLevel {
+ a := NewAtomicLevel()
+ a.SetLevel(l)
+ return a
+}
+
+// Enabled implements the zapcore.LevelEnabler interface, which allows the
+// AtomicLevel to be used in place of traditional static levels.
+func (lvl AtomicLevel) Enabled(l zapcore.Level) bool {
+ return lvl.Level().Enabled(l)
+}
+
+// Level returns the minimum enabled log level.
+func (lvl AtomicLevel) Level() zapcore.Level {
+ return zapcore.Level(int8(lvl.l.Load()))
+}
+
+// SetLevel alters the logging level.
+func (lvl AtomicLevel) SetLevel(l zapcore.Level) {
+ lvl.l.Store(int32(l))
+}
+
+// String returns the string representation of the underlying Level.
+func (lvl AtomicLevel) String() string {
+ return lvl.Level().String()
+}
+
+// UnmarshalText unmarshals the text to an AtomicLevel. It uses the same text
+// representations as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl *AtomicLevel) UnmarshalText(text []byte) error {
+ if lvl.l == nil {
+ lvl.l = &atomic.Int32{}
+ }
+
+ var l zapcore.Level
+ if err := l.UnmarshalText(text); err != nil {
+ return err
+ }
+
+ lvl.SetLevel(l)
+ return nil
+}
+
+// MarshalText marshals the AtomicLevel to a byte slice. It uses the same
+// text representation as the static zapcore.Levels ("debug", "info", "warn",
+// "error", "dpanic", "panic", and "fatal").
+func (lvl AtomicLevel) MarshalText() (text []byte, err error) {
+ return lvl.Level().MarshalText()
+}
diff --git a/vendor/go.uber.org/zap/logger.go b/vendor/go.uber.org/zap/logger.go
new file mode 100644
index 0000000..f116bd9
--- /dev/null
+++ b/vendor/go.uber.org/zap/logger.go
@@ -0,0 +1,348 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "runtime"
+ "strings"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// A Logger provides fast, leveled, structured logging. All methods are safe
+// for concurrent use.
+//
+// The Logger is designed for contexts in which every microsecond and every
+// allocation matters, so its API intentionally favors performance and type
+// safety over brevity. For most applications, the SugaredLogger strikes a
+// better balance between performance and ergonomics.
+type Logger struct {
+ core zapcore.Core
+
+ development bool
+ addCaller bool
+ onFatal zapcore.CheckWriteAction // default is WriteThenFatal
+
+ name string
+ errorOutput zapcore.WriteSyncer
+
+ addStack zapcore.LevelEnabler
+
+ callerSkip int
+
+ clock zapcore.Clock
+}
+
+// New constructs a new Logger from the provided zapcore.Core and Options. If
+// the passed zapcore.Core is nil, it falls back to using a no-op
+// implementation.
+//
+// This is the most flexible way to construct a Logger, but also the most
+// verbose. For typical use cases, the highly-opinionated presets
+// (NewProduction, NewDevelopment, and NewExample) or the Config struct are
+// more convenient.
+//
+// For sample code, see the package-level AdvancedConfiguration example.
+func New(core zapcore.Core, options ...Option) *Logger {
+ if core == nil {
+ return NewNop()
+ }
+ log := &Logger{
+ core: core,
+ errorOutput: zapcore.Lock(os.Stderr),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+ return log.WithOptions(options...)
+}
+
+// NewNop returns a no-op Logger. It never writes out logs or internal errors,
+// and it never runs user-defined hooks.
+//
+// Using WithOptions to replace the Core or error output of a no-op Logger can
+// re-enable logging.
+func NewNop() *Logger {
+ return &Logger{
+ core: zapcore.NewNopCore(),
+ errorOutput: zapcore.AddSync(ioutil.Discard),
+ addStack: zapcore.FatalLevel + 1,
+ clock: zapcore.DefaultClock,
+ }
+}
+
+// NewProduction builds a sensible production Logger that writes InfoLevel and
+// above logs to standard error as JSON.
+//
+// It's a shortcut for NewProductionConfig().Build(...Option).
+func NewProduction(options ...Option) (*Logger, error) {
+ return NewProductionConfig().Build(options...)
+}
+
+// NewDevelopment builds a development Logger that writes DebugLevel and above
+// logs to standard error in a human-friendly format.
+//
+// It's a shortcut for NewDevelopmentConfig().Build(...Option).
+func NewDevelopment(options ...Option) (*Logger, error) {
+ return NewDevelopmentConfig().Build(options...)
+}
+
+// NewExample builds a Logger that's designed for use in zap's testable
+// examples. It writes DebugLevel and above logs to standard out as JSON, but
+// omits the timestamp and calling function to keep example output
+// short and deterministic.
+func NewExample(options ...Option) *Logger {
+ encoderCfg := zapcore.EncoderConfig{
+ MessageKey: "msg",
+ LevelKey: "level",
+ NameKey: "logger",
+ EncodeLevel: zapcore.LowercaseLevelEncoder,
+ EncodeTime: zapcore.ISO8601TimeEncoder,
+ EncodeDuration: zapcore.StringDurationEncoder,
+ }
+ core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderCfg), os.Stdout, DebugLevel)
+ return New(core).WithOptions(options...)
+}
+
+// Sugar wraps the Logger to provide a more ergonomic, but slightly slower,
+// API. Sugaring a Logger is quite inexpensive, so it's reasonable for a
+// single application to use both Loggers and SugaredLoggers, converting
+// between them on the boundaries of performance-sensitive code.
+func (log *Logger) Sugar() *SugaredLogger {
+ core := log.clone()
+ core.callerSkip += 2
+ return &SugaredLogger{core}
+}
+
+// Named adds a new path segment to the logger's name. Segments are joined by
+// periods. By default, Loggers are unnamed.
+func (log *Logger) Named(s string) *Logger {
+ if s == "" {
+ return log
+ }
+ l := log.clone()
+ if log.name == "" {
+ l.name = s
+ } else {
+ l.name = strings.Join([]string{l.name, s}, ".")
+ }
+ return l
+}
+
+// WithOptions clones the current Logger, applies the supplied Options, and
+// returns the resulting Logger. It's safe to use concurrently.
+func (log *Logger) WithOptions(opts ...Option) *Logger {
+ c := log.clone()
+ for _, opt := range opts {
+ opt.apply(c)
+ }
+ return c
+}
+
+// With creates a child logger and adds structured context to it. Fields added
+// to the child don't affect the parent, and vice versa.
+func (log *Logger) With(fields ...Field) *Logger {
+ if len(fields) == 0 {
+ return log
+ }
+ l := log.clone()
+ l.core = l.core.With(fields)
+ return l
+}
+
+// Check returns a CheckedEntry if logging a message at the specified level
+// is enabled. It's a completely optional optimization; in high-performance
+// applications, Check can help avoid allocating a slice to hold fields.
+func (log *Logger) Check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ return log.check(lvl, msg)
+}
+
+// Debug logs a message at DebugLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Debug(msg string, fields ...Field) {
+ if ce := log.check(DebugLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Info logs a message at InfoLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Info(msg string, fields ...Field) {
+ if ce := log.check(InfoLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Warn logs a message at WarnLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Warn(msg string, fields ...Field) {
+ if ce := log.check(WarnLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Error logs a message at ErrorLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+func (log *Logger) Error(msg string, fields ...Field) {
+ if ce := log.check(ErrorLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// DPanic logs a message at DPanicLevel. The message includes any fields
+// passed at the log site, as well as any fields accumulated on the logger.
+//
+// If the logger is in development mode, it then panics (DPanic means
+// "development panic"). This is useful for catching errors that are
+// recoverable, but shouldn't ever happen.
+func (log *Logger) DPanic(msg string, fields ...Field) {
+ if ce := log.check(DPanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Panic logs a message at PanicLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then panics, even if logging at PanicLevel is disabled.
+func (log *Logger) Panic(msg string, fields ...Field) {
+ if ce := log.check(PanicLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Fatal logs a message at FatalLevel. The message includes any fields passed
+// at the log site, as well as any fields accumulated on the logger.
+//
+// The logger then calls os.Exit(1), even if logging at FatalLevel is
+// disabled.
+func (log *Logger) Fatal(msg string, fields ...Field) {
+ if ce := log.check(FatalLevel, msg); ce != nil {
+ ce.Write(fields...)
+ }
+}
+
+// Sync calls the underlying Core's Sync method, flushing any buffered log
+// entries. Applications should take care to call Sync before exiting.
+func (log *Logger) Sync() error {
+ return log.core.Sync()
+}
+
+// Core returns the Logger's underlying zapcore.Core.
+func (log *Logger) Core() zapcore.Core {
+ return log.core
+}
+
+func (log *Logger) clone() *Logger {
+ copy := *log
+ return ©
+}
+
+func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
+ // check must always be called directly by a method in the Logger interface
+ // (e.g., Check, Info, Fatal).
+ const callerSkipOffset = 2
+
+ // Check the level first to reduce the cost of disabled log calls.
+ // Since Panic and higher may exit, we skip the optimization for those levels.
+ if lvl < zapcore.DPanicLevel && !log.core.Enabled(lvl) {
+ return nil
+ }
+
+ // Create basic checked entry thru the core; this will be non-nil if the
+ // log message will actually be written somewhere.
+ ent := zapcore.Entry{
+ LoggerName: log.name,
+ Time: log.clock.Now(),
+ Level: lvl,
+ Message: msg,
+ }
+ ce := log.core.Check(ent, nil)
+ willWrite := ce != nil
+
+ // Set up any required terminal behavior.
+ switch ent.Level {
+ case zapcore.PanicLevel:
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ case zapcore.FatalLevel:
+ onFatal := log.onFatal
+ // Noop is the default value for CheckWriteAction, and it leads to
+ // continued execution after a Fatal which is unexpected.
+ if onFatal == zapcore.WriteThenNoop {
+ onFatal = zapcore.WriteThenFatal
+ }
+ ce = ce.Should(ent, onFatal)
+ case zapcore.DPanicLevel:
+ if log.development {
+ ce = ce.Should(ent, zapcore.WriteThenPanic)
+ }
+ }
+
+ // Only do further annotation if we're going to write this message; checked
+ // entries that exist only for terminal behavior don't benefit from
+ // annotation.
+ if !willWrite {
+ return ce
+ }
+
+ // Thread the error output through to the CheckedEntry.
+ ce.ErrorOutput = log.errorOutput
+ if log.addCaller {
+ frame, defined := getCallerFrame(log.callerSkip + callerSkipOffset)
+ if !defined {
+ fmt.Fprintf(log.errorOutput, "%v Logger.check error: failed to get caller\n", ent.Time.UTC())
+ log.errorOutput.Sync()
+ }
+
+ ce.Entry.Caller = zapcore.EntryCaller{
+ Defined: defined,
+ PC: frame.PC,
+ File: frame.File,
+ Line: frame.Line,
+ Function: frame.Function,
+ }
+ }
+ if log.addStack.Enabled(ce.Entry.Level) {
+ ce.Entry.Stack = StackSkip("", log.callerSkip+callerSkipOffset).String
+ }
+
+ return ce
+}
+
+// getCallerFrame gets caller frame. The argument skip is the number of stack
+// frames to ascend, with 0 identifying the caller of getCallerFrame. The
+// boolean ok is false if it was not possible to recover the information.
+//
+// Note: This implementation is similar to runtime.Caller, but it returns the whole frame.
+func getCallerFrame(skip int) (frame runtime.Frame, ok bool) {
+ const skipOffset = 2 // skip getCallerFrame and Callers
+
+ pc := make([]uintptr, 1)
+ numFrames := runtime.Callers(skip+skipOffset, pc)
+ if numFrames < 1 {
+ return
+ }
+
+ frame, _ = runtime.CallersFrames(pc).Next()
+ return frame, frame.PC != 0
+}
diff --git a/vendor/go.uber.org/zap/options.go b/vendor/go.uber.org/zap/options.go
new file mode 100644
index 0000000..e9e6616
--- /dev/null
+++ b/vendor/go.uber.org/zap/options.go
@@ -0,0 +1,148 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+)
+
+// An Option configures a Logger.
+type Option interface {
+ apply(*Logger)
+}
+
+// optionFunc wraps a func so it satisfies the Option interface.
+type optionFunc func(*Logger)
+
+func (f optionFunc) apply(log *Logger) {
+ f(log)
+}
+
+// WrapCore wraps or replaces the Logger's underlying zapcore.Core.
+func WrapCore(f func(zapcore.Core) zapcore.Core) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = f(log.core)
+ })
+}
+
+// Hooks registers functions which will be called each time the Logger writes
+// out an Entry. Repeated use of Hooks is additive.
+//
+// Hooks are useful for simple side effects, like capturing metrics for the
+// number of emitted logs. More complex side effects, including anything that
+// requires access to the Entry's structured fields, should be implemented as
+// a zapcore.Core instead. See zapcore.RegisterHooks for details.
+func Hooks(hooks ...func(zapcore.Entry) error) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = zapcore.RegisterHooks(log.core, hooks...)
+ })
+}
+
+// Fields adds fields to the Logger.
+func Fields(fs ...Field) Option {
+ return optionFunc(func(log *Logger) {
+ log.core = log.core.With(fs)
+ })
+}
+
+// ErrorOutput sets the destination for errors generated by the Logger. Note
+// that this option only affects internal errors; for sample code that sends
+// error-level logs to a different location from info- and debug-level logs,
+// see the package-level AdvancedConfiguration example.
+//
+// The supplied WriteSyncer must be safe for concurrent use. The Open and
+// zapcore.Lock functions are the simplest ways to protect files with a mutex.
+func ErrorOutput(w zapcore.WriteSyncer) Option {
+ return optionFunc(func(log *Logger) {
+ log.errorOutput = w
+ })
+}
+
+// Development puts the logger in development mode, which makes DPanic-level
+// logs panic instead of simply logging an error.
+func Development() Option {
+ return optionFunc(func(log *Logger) {
+ log.development = true
+ })
+}
+
+// AddCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller. See also WithCaller.
+func AddCaller() Option {
+ return WithCaller(true)
+}
+
+// WithCaller configures the Logger to annotate each message with the filename,
+// line number, and function name of zap's caller, or not, depending on the
+// value of enabled. This is a generalized form of AddCaller.
+func WithCaller(enabled bool) Option {
+ return optionFunc(func(log *Logger) {
+ log.addCaller = enabled
+ })
+}
+
+// AddCallerSkip increases the number of callers skipped by caller annotation
+// (as enabled by the AddCaller option). When building wrappers around the
+// Logger and SugaredLogger, supplying this Option prevents zap from always
+// reporting the wrapper code as the caller.
+func AddCallerSkip(skip int) Option {
+ return optionFunc(func(log *Logger) {
+ log.callerSkip += skip
+ })
+}
+
+// AddStacktrace configures the Logger to record a stack trace for all messages at
+// or above a given level.
+func AddStacktrace(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ log.addStack = lvl
+ })
+}
+
+// IncreaseLevel increase the level of the logger. It has no effect if
+// the passed in level tries to decrease the level of the logger.
+func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
+ return optionFunc(func(log *Logger) {
+ core, err := zapcore.NewIncreaseLevelCore(log.core, lvl)
+ if err != nil {
+ fmt.Fprintf(log.errorOutput, "failed to IncreaseLevel: %v\n", err)
+ } else {
+ log.core = core
+ }
+ })
+}
+
+// OnFatal sets the action to take on fatal logs.
+func OnFatal(action zapcore.CheckWriteAction) Option {
+ return optionFunc(func(log *Logger) {
+ log.onFatal = action
+ })
+}
+
+// WithClock specifies the clock used by the logger to determine the current
+// time for logged entries. Defaults to the system clock with time.Now.
+func WithClock(clock zapcore.Clock) Option {
+ return optionFunc(func(log *Logger) {
+ log.clock = clock
+ })
+}
diff --git a/vendor/go.uber.org/zap/sink.go b/vendor/go.uber.org/zap/sink.go
new file mode 100644
index 0000000..df46fa8
--- /dev/null
+++ b/vendor/go.uber.org/zap/sink.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net/url"
+ "os"
+ "strings"
+ "sync"
+
+ "go.uber.org/zap/zapcore"
+)
+
+const schemeFile = "file"
+
+var (
+ _sinkMutex sync.RWMutex
+ _sinkFactories map[string]func(*url.URL) (Sink, error) // keyed by scheme
+)
+
+func init() {
+ resetSinkRegistry()
+}
+
+func resetSinkRegistry() {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ _sinkFactories = map[string]func(*url.URL) (Sink, error){
+ schemeFile: newFileSink,
+ }
+}
+
+// Sink defines the interface to write to and close logger destinations.
+type Sink interface {
+ zapcore.WriteSyncer
+ io.Closer
+}
+
+type nopCloserSink struct{ zapcore.WriteSyncer }
+
+func (nopCloserSink) Close() error { return nil }
+
+type errSinkNotFound struct {
+ scheme string
+}
+
+func (e *errSinkNotFound) Error() string {
+ return fmt.Sprintf("no sink found for scheme %q", e.scheme)
+}
+
+// RegisterSink registers a user-supplied factory for all sinks with a
+// particular scheme.
+//
+// All schemes must be ASCII, valid under section 3.1 of RFC 3986
+// (https://tools.ietf.org/html/rfc3986#section-3.1), and must not already
+// have a factory registered. Zap automatically registers a factory for the
+// "file" scheme.
+func RegisterSink(scheme string, factory func(*url.URL) (Sink, error)) error {
+ _sinkMutex.Lock()
+ defer _sinkMutex.Unlock()
+
+ if scheme == "" {
+ return errors.New("can't register a sink factory for empty string")
+ }
+ normalized, err := normalizeScheme(scheme)
+ if err != nil {
+ return fmt.Errorf("%q is not a valid scheme: %v", scheme, err)
+ }
+ if _, ok := _sinkFactories[normalized]; ok {
+ return fmt.Errorf("sink factory already registered for scheme %q", normalized)
+ }
+ _sinkFactories[normalized] = factory
+ return nil
+}
+
+func newSink(rawURL string) (Sink, error) {
+ u, err := url.Parse(rawURL)
+ if err != nil {
+ return nil, fmt.Errorf("can't parse %q as a URL: %v", rawURL, err)
+ }
+ if u.Scheme == "" {
+ u.Scheme = schemeFile
+ }
+
+ _sinkMutex.RLock()
+ factory, ok := _sinkFactories[u.Scheme]
+ _sinkMutex.RUnlock()
+ if !ok {
+ return nil, &errSinkNotFound{u.Scheme}
+ }
+ return factory(u)
+}
+
+func newFileSink(u *url.URL) (Sink, error) {
+ if u.User != nil {
+ return nil, fmt.Errorf("user and password not allowed with file URLs: got %v", u)
+ }
+ if u.Fragment != "" {
+ return nil, fmt.Errorf("fragments not allowed with file URLs: got %v", u)
+ }
+ if u.RawQuery != "" {
+ return nil, fmt.Errorf("query parameters not allowed with file URLs: got %v", u)
+ }
+ // Error messages are better if we check hostname and port separately.
+ if u.Port() != "" {
+ return nil, fmt.Errorf("ports not allowed with file URLs: got %v", u)
+ }
+ if hn := u.Hostname(); hn != "" && hn != "localhost" {
+ return nil, fmt.Errorf("file URLs must leave host empty or use localhost: got %v", u)
+ }
+ switch u.Path {
+ case "stdout":
+ return nopCloserSink{os.Stdout}, nil
+ case "stderr":
+ return nopCloserSink{os.Stderr}, nil
+ }
+ return os.OpenFile(u.Path, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0666)
+}
+
+func normalizeScheme(s string) (string, error) {
+ // https://tools.ietf.org/html/rfc3986#section-3.1
+ s = strings.ToLower(s)
+ if first := s[0]; 'a' > first || 'z' < first {
+ return "", errors.New("must start with a letter")
+ }
+ for i := 1; i < len(s); i++ { // iterate over bytes, not runes
+ c := s[i]
+ switch {
+ case 'a' <= c && c <= 'z':
+ continue
+ case '0' <= c && c <= '9':
+ continue
+ case c == '.' || c == '+' || c == '-':
+ continue
+ }
+ return "", fmt.Errorf("may not contain %q", c)
+ }
+ return s, nil
+}
diff --git a/vendor/go.uber.org/zap/stacktrace.go b/vendor/go.uber.org/zap/stacktrace.go
new file mode 100644
index 0000000..0cf8c1d
--- /dev/null
+++ b/vendor/go.uber.org/zap/stacktrace.go
@@ -0,0 +1,85 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "runtime"
+ "sync"
+
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var (
+ _stacktracePool = sync.Pool{
+ New: func() interface{} {
+ return newProgramCounters(64)
+ },
+ }
+)
+
+func takeStacktrace(skip int) string {
+ buffer := bufferpool.Get()
+ defer buffer.Free()
+ programCounters := _stacktracePool.Get().(*programCounters)
+ defer _stacktracePool.Put(programCounters)
+
+ var numFrames int
+ for {
+ // Skip the call to runtime.Callers and takeStacktrace so that the
+ // program counters start at the caller of takeStacktrace.
+ numFrames = runtime.Callers(skip+2, programCounters.pcs)
+ if numFrames < len(programCounters.pcs) {
+ break
+ }
+ // Don't put the too-short counter slice back into the pool; this lets
+ // the pool adjust if we consistently take deep stacktraces.
+ programCounters = newProgramCounters(len(programCounters.pcs) * 2)
+ }
+
+ i := 0
+ frames := runtime.CallersFrames(programCounters.pcs[:numFrames])
+
+ // Note: On the last iteration, frames.Next() returns false, with a valid
+ // frame, but we ignore this frame. The last frame is a a runtime frame which
+ // adds noise, since it's only either runtime.main or runtime.goexit.
+ for frame, more := frames.Next(); more; frame, more = frames.Next() {
+ if i != 0 {
+ buffer.AppendByte('\n')
+ }
+ i++
+ buffer.AppendString(frame.Function)
+ buffer.AppendByte('\n')
+ buffer.AppendByte('\t')
+ buffer.AppendString(frame.File)
+ buffer.AppendByte(':')
+ buffer.AppendInt(int64(frame.Line))
+ }
+
+ return buffer.String()
+}
+
+type programCounters struct {
+ pcs []uintptr
+}
+
+func newProgramCounters(size int) *programCounters {
+ return &programCounters{make([]uintptr, size)}
+}
diff --git a/vendor/go.uber.org/zap/sugar.go b/vendor/go.uber.org/zap/sugar.go
new file mode 100644
index 0000000..0b96519
--- /dev/null
+++ b/vendor/go.uber.org/zap/sugar.go
@@ -0,0 +1,315 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ _oddNumberErrMsg = "Ignored key without a value."
+ _nonStringKeyErrMsg = "Ignored key-value pairs with non-string keys."
+)
+
+// A SugaredLogger wraps the base Logger functionality in a slower, but less
+// verbose, API. Any Logger can be converted to a SugaredLogger with its Sugar
+// method.
+//
+// Unlike the Logger, the SugaredLogger doesn't insist on structured logging.
+// For each log level, it exposes three methods: one for loosely-typed
+// structured logging, one for println-style formatting, and one for
+// printf-style formatting. For example, SugaredLoggers can produce InfoLevel
+// output with Infow ("info with" structured context), Info, or Infof.
+type SugaredLogger struct {
+ base *Logger
+}
+
+// Desugar unwraps a SugaredLogger, exposing the original Logger. Desugaring
+// is quite inexpensive, so it's reasonable for a single application to use
+// both Loggers and SugaredLoggers, converting between them on the boundaries
+// of performance-sensitive code.
+func (s *SugaredLogger) Desugar() *Logger {
+ base := s.base.clone()
+ base.callerSkip -= 2
+ return base
+}
+
+// Named adds a sub-scope to the logger's name. See Logger.Named for details.
+func (s *SugaredLogger) Named(name string) *SugaredLogger {
+ return &SugaredLogger{base: s.base.Named(name)}
+}
+
+// With adds a variadic number of fields to the logging context. It accepts a
+// mix of strongly-typed Field objects and loosely-typed key-value pairs. When
+// processing pairs, the first element of the pair is used as the field key
+// and the second as the field value.
+//
+// For example,
+// sugaredLogger.With(
+// "hello", "world",
+// "failure", errors.New("oh no"),
+// Stack(),
+// "count", 42,
+// "user", User{Name: "alice"},
+// )
+// is the equivalent of
+// unsugared.With(
+// String("hello", "world"),
+// String("failure", "oh no"),
+// Stack(),
+// Int("count", 42),
+// Object("user", User{Name: "alice"}),
+// )
+//
+// Note that the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics. In production, the logger is more
+// forgiving: a separate error is logged, but the key-value pair is skipped
+// and execution continues. Passing an orphaned key triggers similar behavior:
+// panics in development and errors in production.
+func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
+}
+
+// Debug uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Debug(args ...interface{}) {
+ s.log(DebugLevel, "", args, nil)
+}
+
+// Info uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Info(args ...interface{}) {
+ s.log(InfoLevel, "", args, nil)
+}
+
+// Warn uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Warn(args ...interface{}) {
+ s.log(WarnLevel, "", args, nil)
+}
+
+// Error uses fmt.Sprint to construct and log a message.
+func (s *SugaredLogger) Error(args ...interface{}) {
+ s.log(ErrorLevel, "", args, nil)
+}
+
+// DPanic uses fmt.Sprint to construct and log a message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanic(args ...interface{}) {
+ s.log(DPanicLevel, "", args, nil)
+}
+
+// Panic uses fmt.Sprint to construct and log a message, then panics.
+func (s *SugaredLogger) Panic(args ...interface{}) {
+ s.log(PanicLevel, "", args, nil)
+}
+
+// Fatal uses fmt.Sprint to construct and log a message, then calls os.Exit.
+func (s *SugaredLogger) Fatal(args ...interface{}) {
+ s.log(FatalLevel, "", args, nil)
+}
+
+// Debugf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
+ s.log(DebugLevel, template, args, nil)
+}
+
+// Infof uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Infof(template string, args ...interface{}) {
+ s.log(InfoLevel, template, args, nil)
+}
+
+// Warnf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Warnf(template string, args ...interface{}) {
+ s.log(WarnLevel, template, args, nil)
+}
+
+// Errorf uses fmt.Sprintf to log a templated message.
+func (s *SugaredLogger) Errorf(template string, args ...interface{}) {
+ s.log(ErrorLevel, template, args, nil)
+}
+
+// DPanicf uses fmt.Sprintf to log a templated message. In development, the
+// logger then panics. (See DPanicLevel for details.)
+func (s *SugaredLogger) DPanicf(template string, args ...interface{}) {
+ s.log(DPanicLevel, template, args, nil)
+}
+
+// Panicf uses fmt.Sprintf to log a templated message, then panics.
+func (s *SugaredLogger) Panicf(template string, args ...interface{}) {
+ s.log(PanicLevel, template, args, nil)
+}
+
+// Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit.
+func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
+ s.log(FatalLevel, template, args, nil)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+//
+// When debug-level logging is disabled, this is much faster than
+// s.With(keysAndValues).Debug(msg)
+func (s *SugaredLogger) Debugw(msg string, keysAndValues ...interface{}) {
+ s.log(DebugLevel, msg, nil, keysAndValues)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Infow(msg string, keysAndValues ...interface{}) {
+ s.log(InfoLevel, msg, nil, keysAndValues)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Warnw(msg string, keysAndValues ...interface{}) {
+ s.log(WarnLevel, msg, nil, keysAndValues)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Errorw(msg string, keysAndValues ...interface{}) {
+ s.log(ErrorLevel, msg, nil, keysAndValues)
+}
+
+// DPanicw logs a message with some additional context. In development, the
+// logger then panics. (See DPanicLevel for details.) The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) DPanicw(msg string, keysAndValues ...interface{}) {
+ s.log(DPanicLevel, msg, nil, keysAndValues)
+}
+
+// Panicw logs a message with some additional context, then panics. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Panicw(msg string, keysAndValues ...interface{}) {
+ s.log(PanicLevel, msg, nil, keysAndValues)
+}
+
+// Fatalw logs a message with some additional context, then calls os.Exit. The
+// variadic key-value pairs are treated as they are in With.
+func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
+ s.log(FatalLevel, msg, nil, keysAndValues)
+}
+
+// Sync flushes any buffered log entries.
+func (s *SugaredLogger) Sync() error {
+ return s.base.Sync()
+}
+
+func (s *SugaredLogger) log(lvl zapcore.Level, template string, fmtArgs []interface{}, context []interface{}) {
+ // If logging at this level is completely disabled, skip the overhead of
+ // string formatting.
+ if lvl < DPanicLevel && !s.base.Core().Enabled(lvl) {
+ return
+ }
+
+ msg := getMessage(template, fmtArgs)
+ if ce := s.base.Check(lvl, msg); ce != nil {
+ ce.Write(s.sweetenFields(context)...)
+ }
+}
+
+// getMessage format with Sprint, Sprintf, or neither.
+func getMessage(template string, fmtArgs []interface{}) string {
+ if len(fmtArgs) == 0 {
+ return template
+ }
+
+ if template != "" {
+ return fmt.Sprintf(template, fmtArgs...)
+ }
+
+ if len(fmtArgs) == 1 {
+ if str, ok := fmtArgs[0].(string); ok {
+ return str
+ }
+ }
+ return fmt.Sprint(fmtArgs...)
+}
+
+func (s *SugaredLogger) sweetenFields(args []interface{}) []Field {
+ if len(args) == 0 {
+ return nil
+ }
+
+ // Allocate enough space for the worst case; if users pass only structured
+ // fields, we shouldn't penalize them with extra allocations.
+ fields := make([]Field, 0, len(args))
+ var invalid invalidPairs
+
+ for i := 0; i < len(args); {
+ // This is a strongly-typed field. Consume it and move on.
+ if f, ok := args[i].(Field); ok {
+ fields = append(fields, f)
+ i++
+ continue
+ }
+
+ // Make sure this element isn't a dangling key.
+ if i == len(args)-1 {
+ s.base.Error(_oddNumberErrMsg, Any("ignored", args[i]))
+ break
+ }
+
+ // Consume this value and the next, treating them as a key-value pair. If the
+ // key isn't a string, add this pair to the slice of invalid pairs.
+ key, val := args[i], args[i+1]
+ if keyStr, ok := key.(string); !ok {
+ // Subsequent errors are likely, so allocate once up front.
+ if cap(invalid) == 0 {
+ invalid = make(invalidPairs, 0, len(args)/2)
+ }
+ invalid = append(invalid, invalidPair{i, key, val})
+ } else {
+ fields = append(fields, Any(keyStr, val))
+ }
+ i += 2
+ }
+
+ // If we encountered any invalid key-value pairs, log an error.
+ if len(invalid) > 0 {
+ s.base.Error(_nonStringKeyErrMsg, Array("invalid", invalid))
+ }
+ return fields
+}
+
+type invalidPair struct {
+ position int
+ key, value interface{}
+}
+
+func (p invalidPair) MarshalLogObject(enc zapcore.ObjectEncoder) error {
+ enc.AddInt64("position", int64(p.position))
+ Any("key", p.key).AddTo(enc)
+ Any("value", p.value).AddTo(enc)
+ return nil
+}
+
+type invalidPairs []invalidPair
+
+func (ps invalidPairs) MarshalLogArray(enc zapcore.ArrayEncoder) error {
+ var err error
+ for i := range ps {
+ err = multierr.Append(err, enc.AppendObject(ps[i]))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/time.go b/vendor/go.uber.org/zap/time.go
new file mode 100644
index 0000000..c5a1f16
--- /dev/null
+++ b/vendor/go.uber.org/zap/time.go
@@ -0,0 +1,27 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import "time"
+
+func timeToMillis(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond)
+}
diff --git a/vendor/go.uber.org/zap/writer.go b/vendor/go.uber.org/zap/writer.go
new file mode 100644
index 0000000..86a709a
--- /dev/null
+++ b/vendor/go.uber.org/zap/writer.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zap
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+
+ "go.uber.org/zap/zapcore"
+
+ "go.uber.org/multierr"
+)
+
+// Open is a high-level wrapper that takes a variadic number of URLs, opens or
+// creates each of the specified resources, and combines them into a locked
+// WriteSyncer. It also returns any error encountered and a function to close
+// any opened files.
+//
+// Passing no URLs returns a no-op WriteSyncer. Zap handles URLs without a
+// scheme and URLs with the "file" scheme. Third-party code may register
+// factories for other schemes using RegisterSink.
+//
+// URLs with the "file" scheme must use absolute paths on the local
+// filesystem. No user, password, port, fragments, or query parameters are
+// allowed, and the hostname must be empty or "localhost".
+//
+// Since it's common to write logs to the local filesystem, URLs without a
+// scheme (e.g., "/var/log/foo.log") are treated as local file paths. Without
+// a scheme, the special paths "stdout" and "stderr" are interpreted as
+// os.Stdout and os.Stderr. When specified without a scheme, relative file
+// paths also work.
+func Open(paths ...string) (zapcore.WriteSyncer, func(), error) {
+ writers, close, err := open(paths)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ writer := CombineWriteSyncers(writers...)
+ return writer, close, nil
+}
+
+func open(paths []string) ([]zapcore.WriteSyncer, func(), error) {
+ writers := make([]zapcore.WriteSyncer, 0, len(paths))
+ closers := make([]io.Closer, 0, len(paths))
+ close := func() {
+ for _, c := range closers {
+ c.Close()
+ }
+ }
+
+ var openErr error
+ for _, path := range paths {
+ sink, err := newSink(path)
+ if err != nil {
+ openErr = multierr.Append(openErr, fmt.Errorf("couldn't open sink %q: %v", path, err))
+ continue
+ }
+ writers = append(writers, sink)
+ closers = append(closers, sink)
+ }
+ if openErr != nil {
+ close()
+ return writers, nil, openErr
+ }
+
+ return writers, close, nil
+}
+
+// CombineWriteSyncers is a utility that combines multiple WriteSyncers into a
+// single, locked WriteSyncer. If no inputs are supplied, it returns a no-op
+// WriteSyncer.
+//
+// It's provided purely as a convenience; the result is no different from
+// using zapcore.NewMultiWriteSyncer and zapcore.Lock individually.
+func CombineWriteSyncers(writers ...zapcore.WriteSyncer) zapcore.WriteSyncer {
+ if len(writers) == 0 {
+ return zapcore.AddSync(ioutil.Discard)
+ }
+ return zapcore.Lock(zapcore.NewMultiWriteSyncer(writers...))
+}
diff --git a/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
new file mode 100644
index 0000000..0c1436f
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/buffered_write_syncer.go
@@ -0,0 +1,188 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bufio"
+ "sync"
+ "time"
+
+ "go.uber.org/multierr"
+)
+
+const (
+ // _defaultBufferSize specifies the default size used by Buffer.
+ _defaultBufferSize = 256 * 1024 // 256 kB
+
+ // _defaultFlushInterval specifies the default flush interval for
+ // Buffer.
+ _defaultFlushInterval = 30 * time.Second
+)
+
+// A BufferedWriteSyncer is a WriteSyncer that buffers writes in-memory before
+// flushing them to a wrapped WriteSyncer after reaching some limit, or at some
+// fixed interval--whichever comes first.
+//
+// BufferedWriteSyncer is safe for concurrent use. You don't need to use
+// zapcore.Lock for WriteSyncers with BufferedWriteSyncer.
+type BufferedWriteSyncer struct {
+ // WS is the WriteSyncer around which BufferedWriteSyncer will buffer
+ // writes.
+ //
+ // This field is required.
+ WS WriteSyncer
+
+ // Size specifies the maximum amount of data the writer will buffered
+ // before flushing.
+ //
+ // Defaults to 256 kB if unspecified.
+ Size int
+
+ // FlushInterval specifies how often the writer should flush data if
+ // there have been no writes.
+ //
+ // Defaults to 30 seconds if unspecified.
+ FlushInterval time.Duration
+
+ // Clock, if specified, provides control of the source of time for the
+ // writer.
+ //
+ // Defaults to the system clock.
+ Clock Clock
+
+ // unexported fields for state
+ mu sync.Mutex
+ initialized bool // whether initialize() has run
+ writer *bufio.Writer
+ ticker *time.Ticker
+ stop chan struct{} // closed when flushLoop should stop
+ stopped bool // whether Stop() has run
+ done chan struct{} // closed when flushLoop has stopped
+}
+
+func (s *BufferedWriteSyncer) initialize() {
+ size := s.Size
+ if size == 0 {
+ size = _defaultBufferSize
+ }
+
+ flushInterval := s.FlushInterval
+ if flushInterval == 0 {
+ flushInterval = _defaultFlushInterval
+ }
+
+ if s.Clock == nil {
+ s.Clock = DefaultClock
+ }
+
+ s.ticker = s.Clock.NewTicker(flushInterval)
+ s.writer = bufio.NewWriterSize(s.WS, size)
+ s.stop = make(chan struct{})
+ s.done = make(chan struct{})
+ s.initialized = true
+ go s.flushLoop()
+}
+
+// Write writes log data into buffer syncer directly, multiple Write calls will be batched,
+// and log data will be flushed to disk when the buffer is full or periodically.
+func (s *BufferedWriteSyncer) Write(bs []byte) (int, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ s.initialize()
+ }
+
+ // To avoid partial writes from being flushed, we manually flush the existing buffer if:
+ // * The current write doesn't fit into the buffer fully, and
+ // * The buffer is not empty (since bufio will not split large writes when the buffer is empty)
+ if len(bs) > s.writer.Available() && s.writer.Buffered() > 0 {
+ if err := s.writer.Flush(); err != nil {
+ return 0, err
+ }
+ }
+
+ return s.writer.Write(bs)
+}
+
+// Sync flushes buffered log data into disk directly.
+func (s *BufferedWriteSyncer) Sync() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ var err error
+ if s.initialized {
+ err = s.writer.Flush()
+ }
+
+ return multierr.Append(err, s.WS.Sync())
+}
+
+// flushLoop flushes the buffer at the configured interval until Stop is
+// called.
+func (s *BufferedWriteSyncer) flushLoop() {
+ defer close(s.done)
+
+ for {
+ select {
+ case <-s.ticker.C:
+ // we just simply ignore error here
+ // because the underlying bufio writer stores any errors
+ // and we return any error from Sync() as part of the close
+ _ = s.Sync()
+ case <-s.stop:
+ return
+ }
+ }
+}
+
+// Stop closes the buffer, cleans up background goroutines, and flushes
+// remaining unwritten data.
+func (s *BufferedWriteSyncer) Stop() (err error) {
+ var stopped bool
+
+ // Critical section.
+ func() {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if !s.initialized {
+ return
+ }
+
+ stopped = s.stopped
+ if stopped {
+ return
+ }
+ s.stopped = true
+
+ s.ticker.Stop()
+ close(s.stop) // tell flushLoop to stop
+ <-s.done // and wait until it has
+ }()
+
+ // Don't call Sync on consecutive Stops.
+ if !stopped {
+ err = s.Sync()
+ }
+
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/clock.go b/vendor/go.uber.org/zap/zapcore/clock.go
new file mode 100644
index 0000000..d2ea95b
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/clock.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2021 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+)
+
+// DefaultClock is the default clock used by Zap in operations that require
+// time. This clock uses the system clock for all operations.
+var DefaultClock = systemClock{}
+
+// Clock is a source of time for logged entries.
+type Clock interface {
+ // Now returns the current local time.
+ Now() time.Time
+
+ // NewTicker returns *time.Ticker that holds a channel
+ // that delivers "ticks" of a clock.
+ NewTicker(time.Duration) *time.Ticker
+}
+
+// systemClock implements default Clock that uses system time.
+type systemClock struct{}
+
+func (systemClock) Now() time.Time {
+ return time.Now()
+}
+
+func (systemClock) NewTicker(duration time.Duration) *time.Ticker {
+ return time.NewTicker(duration)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/console_encoder.go b/vendor/go.uber.org/zap/zapcore/console_encoder.go
new file mode 100644
index 0000000..2307af4
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -0,0 +1,161 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "sync"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+var _sliceEncoderPool = sync.Pool{
+ New: func() interface{} {
+ return &sliceArrayEncoder{elems: make([]interface{}, 0, 2)}
+ },
+}
+
+func getSliceEncoder() *sliceArrayEncoder {
+ return _sliceEncoderPool.Get().(*sliceArrayEncoder)
+}
+
+func putSliceEncoder(e *sliceArrayEncoder) {
+ e.elems = e.elems[:0]
+ _sliceEncoderPool.Put(e)
+}
+
+type consoleEncoder struct {
+ *jsonEncoder
+}
+
+// NewConsoleEncoder creates an encoder whose output is designed for human -
+// rather than machine - consumption. It serializes the core log entry data
+// (message, level, timestamp, etc.) in a plain-text format and leaves the
+// structured context as JSON.
+//
+// Note that although the console encoder doesn't use the keys specified in the
+// encoder configuration, it will omit any element whose key is set to the empty
+// string.
+func NewConsoleEncoder(cfg EncoderConfig) Encoder {
+ if cfg.ConsoleSeparator == "" {
+ // Use a default delimiter of '\t' for backwards compatibility
+ cfg.ConsoleSeparator = "\t"
+ }
+ return consoleEncoder{newJSONEncoder(cfg, true)}
+}
+
+func (c consoleEncoder) Clone() Encoder {
+ return consoleEncoder{c.jsonEncoder.Clone().(*jsonEncoder)}
+}
+
+func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ line := bufferpool.Get()
+
+ // We don't want the entry's metadata to be quoted and escaped (if it's
+ // encoded as strings), which means that we can't use the JSON encoder. The
+ // simplest option is to use the memory encoder and fmt.Fprint.
+ //
+ // If this ever becomes a performance bottleneck, we can implement
+ // ArrayEncoder for our plain-text format.
+ arr := getSliceEncoder()
+ if c.TimeKey != "" && c.EncodeTime != nil {
+ c.EncodeTime(ent.Time, arr)
+ }
+ if c.LevelKey != "" && c.EncodeLevel != nil {
+ c.EncodeLevel(ent.Level, arr)
+ }
+ if ent.LoggerName != "" && c.NameKey != "" {
+ nameEncoder := c.EncodeName
+
+ if nameEncoder == nil {
+ // Fall back to FullNameEncoder for backward compatibility.
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, arr)
+ }
+ if ent.Caller.Defined {
+ if c.CallerKey != "" && c.EncodeCaller != nil {
+ c.EncodeCaller(ent.Caller, arr)
+ }
+ if c.FunctionKey != "" {
+ arr.AppendString(ent.Caller.Function)
+ }
+ }
+ for i := range arr.elems {
+ if i > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+ fmt.Fprint(line, arr.elems[i])
+ }
+ putSliceEncoder(arr)
+
+ // Add the message itself.
+ if c.MessageKey != "" {
+ c.addSeparatorIfNecessary(line)
+ line.AppendString(ent.Message)
+ }
+
+ // Add any structured context.
+ c.writeContext(line, fields)
+
+ // If there's no stacktrace key, honor that; this allows users to force
+ // single-line output.
+ if ent.Stack != "" && c.StacktraceKey != "" {
+ line.AppendByte('\n')
+ line.AppendString(ent.Stack)
+ }
+
+ if c.LineEnding != "" {
+ line.AppendString(c.LineEnding)
+ } else {
+ line.AppendString(DefaultLineEnding)
+ }
+ return line, nil
+}
+
+func (c consoleEncoder) writeContext(line *buffer.Buffer, extra []Field) {
+ context := c.jsonEncoder.Clone().(*jsonEncoder)
+ defer func() {
+ // putJSONEncoder assumes the buffer is still used, but we write out the buffer so
+ // we can free it.
+ context.buf.Free()
+ putJSONEncoder(context)
+ }()
+
+ addFields(context, extra)
+ context.closeOpenNamespaces()
+ if context.buf.Len() == 0 {
+ return
+ }
+
+ c.addSeparatorIfNecessary(line)
+ line.AppendByte('{')
+ line.Write(context.buf.Bytes())
+ line.AppendByte('}')
+}
+
+func (c consoleEncoder) addSeparatorIfNecessary(line *buffer.Buffer) {
+ if line.Len() > 0 {
+ line.AppendString(c.ConsoleSeparator)
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/core.go b/vendor/go.uber.org/zap/zapcore/core.go
new file mode 100644
index 0000000..a1ef8b0
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/core.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// Core is a minimal, fast logger interface. It's designed for library authors
+// to wrap in a more user-friendly API.
+type Core interface {
+ LevelEnabler
+
+ // With adds structured context to the Core.
+ With([]Field) Core
+ // Check determines whether the supplied Entry should be logged (using the
+ // embedded LevelEnabler and possibly some extra logic). If the entry
+ // should be logged, the Core adds itself to the CheckedEntry and returns
+ // the result.
+ //
+ // Callers must use Check before calling Write.
+ Check(Entry, *CheckedEntry) *CheckedEntry
+ // Write serializes the Entry and any Fields supplied at the log site and
+ // writes them to their destination.
+ //
+ // If called, Write should always log the Entry and Fields; it should not
+ // replicate the logic of Check.
+ Write(Entry, []Field) error
+ // Sync flushes buffered logs (if any).
+ Sync() error
+}
+
+type nopCore struct{}
+
+// NewNopCore returns a no-op Core.
+func NewNopCore() Core { return nopCore{} }
+func (nopCore) Enabled(Level) bool { return false }
+func (n nopCore) With([]Field) Core { return n }
+func (nopCore) Check(_ Entry, ce *CheckedEntry) *CheckedEntry { return ce }
+func (nopCore) Write(Entry, []Field) error { return nil }
+func (nopCore) Sync() error { return nil }
+
+// NewCore creates a Core that writes logs to a WriteSyncer.
+func NewCore(enc Encoder, ws WriteSyncer, enab LevelEnabler) Core {
+ return &ioCore{
+ LevelEnabler: enab,
+ enc: enc,
+ out: ws,
+ }
+}
+
+type ioCore struct {
+ LevelEnabler
+ enc Encoder
+ out WriteSyncer
+}
+
+func (c *ioCore) With(fields []Field) Core {
+ clone := c.clone()
+ addFields(clone.enc, fields)
+ return clone
+}
+
+func (c *ioCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if c.Enabled(ent.Level) {
+ return ce.AddCore(ent, c)
+ }
+ return ce
+}
+
+func (c *ioCore) Write(ent Entry, fields []Field) error {
+ buf, err := c.enc.EncodeEntry(ent, fields)
+ if err != nil {
+ return err
+ }
+ _, err = c.out.Write(buf.Bytes())
+ buf.Free()
+ if err != nil {
+ return err
+ }
+ if ent.Level > ErrorLevel {
+ // Since we may be crashing the program, sync the output. Ignore Sync
+ // errors, pending a clean solution to issue #370.
+ c.Sync()
+ }
+ return nil
+}
+
+func (c *ioCore) Sync() error {
+ return c.out.Sync()
+}
+
+func (c *ioCore) clone() *ioCore {
+ return &ioCore{
+ LevelEnabler: c.LevelEnabler,
+ enc: c.enc.Clone(),
+ out: c.out,
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/doc.go b/vendor/go.uber.org/zap/zapcore/doc.go
new file mode 100644
index 0000000..31000e9
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+// Package zapcore defines and implements the low-level interfaces upon which
+// zap is built. By providing alternate implementations of these interfaces,
+// external packages can extend zap's capabilities.
+package zapcore // import "go.uber.org/zap/zapcore"
diff --git a/vendor/go.uber.org/zap/zapcore/encoder.go b/vendor/go.uber.org/zap/zapcore/encoder.go
new file mode 100644
index 0000000..6601ca1
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -0,0 +1,443 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/json"
+ "time"
+
+ "go.uber.org/zap/buffer"
+)
+
+// DefaultLineEnding defines the default line ending when writing logs.
+// Alternate line endings specified in EncoderConfig can override this
+// behavior.
+const DefaultLineEnding = "\n"
+
+// OmitKey defines the key to use when callers want to remove a key from log output.
+const OmitKey = ""
+
+// A LevelEncoder serializes a Level to a primitive type.
+type LevelEncoder func(Level, PrimitiveArrayEncoder)
+
+// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
+// InfoLevel is serialized to "info".
+func LowercaseLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.String())
+}
+
+// LowercaseColorLevelEncoder serializes a Level to a lowercase string and adds coloring.
+// For example, InfoLevel is serialized to "info" and colored blue.
+func LowercaseColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToLowercaseColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.String())
+ }
+ enc.AppendString(s)
+}
+
+// CapitalLevelEncoder serializes a Level to an all-caps string. For example,
+// InfoLevel is serialized to "INFO".
+func CapitalLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ enc.AppendString(l.CapitalString())
+}
+
+// CapitalColorLevelEncoder serializes a Level to an all-caps string and adds color.
+// For example, InfoLevel is serialized to "INFO" and colored blue.
+func CapitalColorLevelEncoder(l Level, enc PrimitiveArrayEncoder) {
+ s, ok := _levelToCapitalColorString[l]
+ if !ok {
+ s = _unknownLevelColor.Add(l.CapitalString())
+ }
+ enc.AppendString(s)
+}
+
+// UnmarshalText unmarshals text to a LevelEncoder. "capital" is unmarshaled to
+// CapitalLevelEncoder, "coloredCapital" is unmarshaled to CapitalColorLevelEncoder,
+// "colored" is unmarshaled to LowercaseColorLevelEncoder, and anything else
+// is unmarshaled to LowercaseLevelEncoder.
+func (e *LevelEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "capital":
+ *e = CapitalLevelEncoder
+ case "capitalColor":
+ *e = CapitalColorLevelEncoder
+ case "color":
+ *e = LowercaseColorLevelEncoder
+ default:
+ *e = LowercaseLevelEncoder
+ }
+ return nil
+}
+
+// A TimeEncoder serializes a time.Time to a primitive type.
+type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
+
+// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
+// since the Unix epoch.
+func EpochTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ sec := float64(nanos) / float64(time.Second)
+ enc.AppendFloat64(sec)
+}
+
+// EpochMillisTimeEncoder serializes a time.Time to a floating-point number of
+// milliseconds since the Unix epoch.
+func EpochMillisTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ nanos := t.UnixNano()
+ millis := float64(nanos) / float64(time.Millisecond)
+ enc.AppendFloat64(millis)
+}
+
+// EpochNanosTimeEncoder serializes a time.Time to an integer number of
+// nanoseconds since the Unix epoch.
+func EpochNanosTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(t.UnixNano())
+}
+
+func encodeTimeLayout(t time.Time, layout string, enc PrimitiveArrayEncoder) {
+ type appendTimeEncoder interface {
+ AppendTimeLayout(time.Time, string)
+ }
+
+ if enc, ok := enc.(appendTimeEncoder); ok {
+ enc.AppendTimeLayout(t, layout)
+ return
+ }
+
+ enc.AppendString(t.Format(layout))
+}
+
+// ISO8601TimeEncoder serializes a time.Time to an ISO8601-formatted string
+// with millisecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func ISO8601TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, "2006-01-02T15:04:05.000Z0700", enc)
+}
+
+// RFC3339TimeEncoder serializes a time.Time to an RFC3339-formatted string.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339TimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339, enc)
+}
+
+// RFC3339NanoTimeEncoder serializes a time.Time to an RFC3339-formatted string
+// with nanosecond precision.
+//
+// If enc supports AppendTimeLayout(t time.Time,layout string), it's used
+// instead of appending a pre-formatted string value.
+func RFC3339NanoTimeEncoder(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, time.RFC3339Nano, enc)
+}
+
+// TimeEncoderOfLayout returns TimeEncoder which serializes a time.Time using
+// given layout.
+func TimeEncoderOfLayout(layout string) TimeEncoder {
+ return func(t time.Time, enc PrimitiveArrayEncoder) {
+ encodeTimeLayout(t, layout, enc)
+ }
+}
+
+// UnmarshalText unmarshals text to a TimeEncoder.
+// "rfc3339nano" and "RFC3339Nano" are unmarshaled to RFC3339NanoTimeEncoder.
+// "rfc3339" and "RFC3339" are unmarshaled to RFC3339TimeEncoder.
+// "iso8601" and "ISO8601" are unmarshaled to ISO8601TimeEncoder.
+// "millis" is unmarshaled to EpochMillisTimeEncoder.
+// "nanos" is unmarshaled to EpochNanosEncoder.
+// Anything else is unmarshaled to EpochTimeEncoder.
+func (e *TimeEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "rfc3339nano", "RFC3339Nano":
+ *e = RFC3339NanoTimeEncoder
+ case "rfc3339", "RFC3339":
+ *e = RFC3339TimeEncoder
+ case "iso8601", "ISO8601":
+ *e = ISO8601TimeEncoder
+ case "millis":
+ *e = EpochMillisTimeEncoder
+ case "nanos":
+ *e = EpochNanosTimeEncoder
+ default:
+ *e = EpochTimeEncoder
+ }
+ return nil
+}
+
+// UnmarshalYAML unmarshals YAML to a TimeEncoder.
+// If value is an object with a "layout" field, it will be unmarshaled to TimeEncoder with given layout.
+// timeEncoder:
+// layout: 06/01/02 03:04pm
+// If value is string, it uses UnmarshalText.
+// timeEncoder: iso8601
+func (e *TimeEncoder) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var o struct {
+ Layout string `json:"layout" yaml:"layout"`
+ }
+ if err := unmarshal(&o); err == nil {
+ *e = TimeEncoderOfLayout(o.Layout)
+ return nil
+ }
+
+ var s string
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+ return e.UnmarshalText([]byte(s))
+}
+
+// UnmarshalJSON unmarshals JSON to a TimeEncoder as same way UnmarshalYAML does.
+func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
+ return e.UnmarshalYAML(func(v interface{}) error {
+ return json.Unmarshal(data, v)
+ })
+}
+
+// A DurationEncoder serializes a time.Duration to a primitive type.
+type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
+
+// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
+func SecondsDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendFloat64(float64(d) / float64(time.Second))
+}
+
+// NanosDurationEncoder serializes a time.Duration to an integer number of
+// nanoseconds elapsed.
+func NanosDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(int64(d))
+}
+
+// MillisDurationEncoder serializes a time.Duration to an integer number of
+// milliseconds elapsed.
+func MillisDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendInt64(d.Nanoseconds() / 1e6)
+}
+
+// StringDurationEncoder serializes a time.Duration using its built-in String
+// method.
+func StringDurationEncoder(d time.Duration, enc PrimitiveArrayEncoder) {
+ enc.AppendString(d.String())
+}
+
+// UnmarshalText unmarshals text to a DurationEncoder. "string" is unmarshaled
+// to StringDurationEncoder, and anything else is unmarshaled to
+// NanosDurationEncoder.
+func (e *DurationEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "string":
+ *e = StringDurationEncoder
+ case "nanos":
+ *e = NanosDurationEncoder
+ case "ms":
+ *e = MillisDurationEncoder
+ default:
+ *e = SecondsDurationEncoder
+ }
+ return nil
+}
+
+// A CallerEncoder serializes an EntryCaller to a primitive type.
+type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
+
+// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
+// format.
+func FullCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.String())
+}
+
+// ShortCallerEncoder serializes a caller in package/file:line format, trimming
+// all but the final directory from the full path.
+func ShortCallerEncoder(caller EntryCaller, enc PrimitiveArrayEncoder) {
+ // TODO: consider using a byte-oriented API to save an allocation.
+ enc.AppendString(caller.TrimmedPath())
+}
+
+// UnmarshalText unmarshals text to a CallerEncoder. "full" is unmarshaled to
+// FullCallerEncoder and anything else is unmarshaled to ShortCallerEncoder.
+func (e *CallerEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullCallerEncoder
+ default:
+ *e = ShortCallerEncoder
+ }
+ return nil
+}
+
+// A NameEncoder serializes a period-separated logger name to a primitive
+// type.
+type NameEncoder func(string, PrimitiveArrayEncoder)
+
+// FullNameEncoder serializes the logger name as-is.
+func FullNameEncoder(loggerName string, enc PrimitiveArrayEncoder) {
+ enc.AppendString(loggerName)
+}
+
+// UnmarshalText unmarshals text to a NameEncoder. Currently, everything is
+// unmarshaled to FullNameEncoder.
+func (e *NameEncoder) UnmarshalText(text []byte) error {
+ switch string(text) {
+ case "full":
+ *e = FullNameEncoder
+ default:
+ *e = FullNameEncoder
+ }
+ return nil
+}
+
+// An EncoderConfig allows users to configure the concrete encoders supplied by
+// zapcore.
+type EncoderConfig struct {
+ // Set the keys used for each log entry. If any key is empty, that portion
+ // of the entry is omitted.
+ MessageKey string `json:"messageKey" yaml:"messageKey"`
+ LevelKey string `json:"levelKey" yaml:"levelKey"`
+ TimeKey string `json:"timeKey" yaml:"timeKey"`
+ NameKey string `json:"nameKey" yaml:"nameKey"`
+ CallerKey string `json:"callerKey" yaml:"callerKey"`
+ FunctionKey string `json:"functionKey" yaml:"functionKey"`
+ StacktraceKey string `json:"stacktraceKey" yaml:"stacktraceKey"`
+ LineEnding string `json:"lineEnding" yaml:"lineEnding"`
+ // Configure the primitive representations of common complex types. For
+ // example, some users may want all time.Times serialized as floating-point
+ // seconds since epoch, while others may prefer ISO8601 strings.
+ EncodeLevel LevelEncoder `json:"levelEncoder" yaml:"levelEncoder"`
+ EncodeTime TimeEncoder `json:"timeEncoder" yaml:"timeEncoder"`
+ EncodeDuration DurationEncoder `json:"durationEncoder" yaml:"durationEncoder"`
+ EncodeCaller CallerEncoder `json:"callerEncoder" yaml:"callerEncoder"`
+ // Unlike the other primitive type encoders, EncodeName is optional. The
+ // zero value falls back to FullNameEncoder.
+ EncodeName NameEncoder `json:"nameEncoder" yaml:"nameEncoder"`
+ // Configures the field separator used by the console encoder. Defaults
+ // to tab.
+ ConsoleSeparator string `json:"consoleSeparator" yaml:"consoleSeparator"`
+}
+
+// ObjectEncoder is a strongly-typed, encoding-agnostic interface for adding a
+// map- or struct-like object to the logging context. Like maps, ObjectEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ObjectEncoder interface {
+ // Logging-specific marshalers.
+ AddArray(key string, marshaler ArrayMarshaler) error
+ AddObject(key string, marshaler ObjectMarshaler) error
+
+ // Built-in types.
+ AddBinary(key string, value []byte) // for arbitrary bytes
+ AddByteString(key string, value []byte) // for UTF-8 encoded bytes
+ AddBool(key string, value bool)
+ AddComplex128(key string, value complex128)
+ AddComplex64(key string, value complex64)
+ AddDuration(key string, value time.Duration)
+ AddFloat64(key string, value float64)
+ AddFloat32(key string, value float32)
+ AddInt(key string, value int)
+ AddInt64(key string, value int64)
+ AddInt32(key string, value int32)
+ AddInt16(key string, value int16)
+ AddInt8(key string, value int8)
+ AddString(key, value string)
+ AddTime(key string, value time.Time)
+ AddUint(key string, value uint)
+ AddUint64(key string, value uint64)
+ AddUint32(key string, value uint32)
+ AddUint16(key string, value uint16)
+ AddUint8(key string, value uint8)
+ AddUintptr(key string, value uintptr)
+
+ // AddReflected uses reflection to serialize arbitrary objects, so it can be
+ // slow and allocation-heavy.
+ AddReflected(key string, value interface{}) error
+ // OpenNamespace opens an isolated namespace where all subsequent fields will
+ // be added. Applications can use namespaces to prevent key collisions when
+ // injecting loggers into sub-components or third-party libraries.
+ OpenNamespace(key string)
+}
+
+// ArrayEncoder is a strongly-typed, encoding-agnostic interface for adding
+// array-like objects to the logging context. Of note, it supports mixed-type
+// arrays even though they aren't typical in Go. Like slices, ArrayEncoders
+// aren't safe for concurrent use (though typical use shouldn't require locks).
+type ArrayEncoder interface {
+ // Built-in types.
+ PrimitiveArrayEncoder
+
+ // Time-related types.
+ AppendDuration(time.Duration)
+ AppendTime(time.Time)
+
+ // Logging-specific marshalers.
+ AppendArray(ArrayMarshaler) error
+ AppendObject(ObjectMarshaler) error
+
+ // AppendReflected uses reflection to serialize arbitrary objects, so it's
+ // slow and allocation-heavy.
+ AppendReflected(value interface{}) error
+}
+
+// PrimitiveArrayEncoder is the subset of the ArrayEncoder interface that deals
+// only in Go's built-in types. It's included only so that Duration- and
+// TimeEncoders cannot trigger infinite recursion.
+type PrimitiveArrayEncoder interface {
+ // Built-in types.
+ AppendBool(bool)
+ AppendByteString([]byte) // for UTF-8 encoded bytes
+ AppendComplex128(complex128)
+ AppendComplex64(complex64)
+ AppendFloat64(float64)
+ AppendFloat32(float32)
+ AppendInt(int)
+ AppendInt64(int64)
+ AppendInt32(int32)
+ AppendInt16(int16)
+ AppendInt8(int8)
+ AppendString(string)
+ AppendUint(uint)
+ AppendUint64(uint64)
+ AppendUint32(uint32)
+ AppendUint16(uint16)
+ AppendUint8(uint8)
+ AppendUintptr(uintptr)
+}
+
+// Encoder is a format-agnostic interface for all log entry marshalers. Since
+// log encoders don't need to support the same wide range of use cases as
+// general-purpose marshalers, it's possible to make them faster and
+// lower-allocation.
+//
+// Implementations of the ObjectEncoder interface's methods can, of course,
+// freely modify the receiver. However, the Clone and EncodeEntry methods will
+// be called concurrently and shouldn't modify the receiver.
+type Encoder interface {
+ ObjectEncoder
+
+ // Clone copies the encoder, ensuring that adding fields to the copy doesn't
+ // affect the original.
+ Clone() Encoder
+
+ // EncodeEntry encodes an entry and fields, along with any accumulated
+ // context, into a byte buffer and returns it. Any fields that are empty,
+ // including fields on the `Entry` type, should be omitted.
+ EncodeEntry(Entry, []Field) (*buffer.Buffer, error)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/entry.go b/vendor/go.uber.org/zap/zapcore/entry.go
new file mode 100644
index 0000000..2d815fe
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/entry.go
@@ -0,0 +1,264 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "go.uber.org/zap/internal/bufferpool"
+ "go.uber.org/zap/internal/exit"
+
+ "go.uber.org/multierr"
+)
+
+var (
+ _cePool = sync.Pool{New: func() interface{} {
+ // Pre-allocate some space for cores.
+ return &CheckedEntry{
+ cores: make([]Core, 4),
+ }
+ }}
+)
+
+func getCheckedEntry() *CheckedEntry {
+ ce := _cePool.Get().(*CheckedEntry)
+ ce.reset()
+ return ce
+}
+
+func putCheckedEntry(ce *CheckedEntry) {
+ if ce == nil {
+ return
+ }
+ _cePool.Put(ce)
+}
+
+// NewEntryCaller makes an EntryCaller from the return signature of
+// runtime.Caller.
+func NewEntryCaller(pc uintptr, file string, line int, ok bool) EntryCaller {
+ if !ok {
+ return EntryCaller{}
+ }
+ return EntryCaller{
+ PC: pc,
+ File: file,
+ Line: line,
+ Defined: true,
+ }
+}
+
+// EntryCaller represents the caller of a logging function.
+type EntryCaller struct {
+ Defined bool
+ PC uintptr
+ File string
+ Line int
+ Function string
+}
+
+// String returns the full path and line number of the caller.
+func (ec EntryCaller) String() string {
+ return ec.FullPath()
+}
+
+// FullPath returns a /full/path/to/package/file:line description of the
+// caller.
+func (ec EntryCaller) FullPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ buf := bufferpool.Get()
+ buf.AppendString(ec.File)
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// TrimmedPath returns a package/file:line description of the caller,
+// preserving only the leaf directory name and file name.
+func (ec EntryCaller) TrimmedPath() string {
+ if !ec.Defined {
+ return "undefined"
+ }
+ // nb. To make sure we trim the path correctly on Windows too, we
+ // counter-intuitively need to use '/' and *not* os.PathSeparator here,
+ // because the path given originates from Go stdlib, specifically
+ // runtime.Caller() which (as of Mar/17) returns forward slashes even on
+ // Windows.
+ //
+ // See https://github.com/golang/go/issues/3335
+ // and https://github.com/golang/go/issues/18151
+ //
+ // for discussion on the issue on Go side.
+ //
+ // Find the last separator.
+ //
+ idx := strings.LastIndexByte(ec.File, '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ // Find the penultimate separator.
+ idx = strings.LastIndexByte(ec.File[:idx], '/')
+ if idx == -1 {
+ return ec.FullPath()
+ }
+ buf := bufferpool.Get()
+ // Keep everything after the penultimate separator.
+ buf.AppendString(ec.File[idx+1:])
+ buf.AppendByte(':')
+ buf.AppendInt(int64(ec.Line))
+ caller := buf.String()
+ buf.Free()
+ return caller
+}
+
+// An Entry represents a complete log message. The entry's structured context
+// is already serialized, but the log level, time, message, and call site
+// information are available for inspection and modification. Any fields left
+// empty will be omitted when encoding.
+//
+// Entries are pooled, so any functions that accept them MUST be careful not to
+// retain references to them.
+type Entry struct {
+ Level Level
+ Time time.Time
+ LoggerName string
+ Message string
+ Caller EntryCaller
+ Stack string
+}
+
+// CheckWriteAction indicates what action to take after a log entry is
+// processed. Actions are ordered in increasing severity.
+type CheckWriteAction uint8
+
+const (
+ // WriteThenNoop indicates that nothing special needs to be done. It's the
+ // default behavior.
+ WriteThenNoop CheckWriteAction = iota
+ // WriteThenGoexit runs runtime.Goexit after Write.
+ WriteThenGoexit
+ // WriteThenPanic causes a panic after Write.
+ WriteThenPanic
+ // WriteThenFatal causes a fatal os.Exit after Write.
+ WriteThenFatal
+)
+
+// CheckedEntry is an Entry together with a collection of Cores that have
+// already agreed to log it.
+//
+// CheckedEntry references should be created by calling AddCore or Should on a
+// nil *CheckedEntry. References are returned to a pool after Write, and MUST
+// NOT be retained after calling their Write method.
+type CheckedEntry struct {
+ Entry
+ ErrorOutput WriteSyncer
+ dirty bool // best-effort detection of pool misuse
+ should CheckWriteAction
+ cores []Core
+}
+
+func (ce *CheckedEntry) reset() {
+ ce.Entry = Entry{}
+ ce.ErrorOutput = nil
+ ce.dirty = false
+ ce.should = WriteThenNoop
+ for i := range ce.cores {
+ // don't keep references to cores
+ ce.cores[i] = nil
+ }
+ ce.cores = ce.cores[:0]
+}
+
+// Write writes the entry to the stored Cores, returns any errors, and returns
+// the CheckedEntry reference to a pool for immediate re-use. Finally, it
+// executes any required CheckWriteAction.
+func (ce *CheckedEntry) Write(fields ...Field) {
+ if ce == nil {
+ return
+ }
+
+ if ce.dirty {
+ if ce.ErrorOutput != nil {
+ // Make a best effort to detect unsafe re-use of this CheckedEntry.
+ // If the entry is dirty, log an internal error; because the
+ // CheckedEntry is being used after it was returned to the pool,
+ // the message may be an amalgamation from multiple call sites.
+ fmt.Fprintf(ce.ErrorOutput, "%v Unsafe CheckedEntry re-use near Entry %+v.\n", ce.Time, ce.Entry)
+ ce.ErrorOutput.Sync()
+ }
+ return
+ }
+ ce.dirty = true
+
+ var err error
+ for i := range ce.cores {
+ err = multierr.Append(err, ce.cores[i].Write(ce.Entry, fields))
+ }
+ if ce.ErrorOutput != nil {
+ if err != nil {
+ fmt.Fprintf(ce.ErrorOutput, "%v write error: %v\n", ce.Time, err)
+ ce.ErrorOutput.Sync()
+ }
+ }
+
+ should, msg := ce.should, ce.Message
+ putCheckedEntry(ce)
+
+ switch should {
+ case WriteThenPanic:
+ panic(msg)
+ case WriteThenFatal:
+ exit.Exit()
+ case WriteThenGoexit:
+ runtime.Goexit()
+ }
+}
+
+// AddCore adds a Core that has agreed to log this CheckedEntry. It's intended to be
+// used by Core.Check implementations, and is safe to call on nil CheckedEntry
+// references.
+func (ce *CheckedEntry) AddCore(ent Entry, core Core) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.cores = append(ce.cores, core)
+ return ce
+}
+
+// Should sets this CheckedEntry's CheckWriteAction, which controls whether a
+// Core will panic or fatal after writing this log entry. Like AddCore, it's
+// safe to call on nil CheckedEntry references.
+func (ce *CheckedEntry) Should(ent Entry, should CheckWriteAction) *CheckedEntry {
+ if ce == nil {
+ ce = getCheckedEntry()
+ ce.Entry = ent
+ }
+ ce.should = should
+ return ce
+}
diff --git a/vendor/go.uber.org/zap/zapcore/error.go b/vendor/go.uber.org/zap/zapcore/error.go
new file mode 100644
index 0000000..f2a07d7
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/error.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "fmt"
+ "reflect"
+ "sync"
+)
+
+// Encodes the given error into fields of an object. A field with the given
+// name is added for the error message.
+//
+// If the error implements fmt.Formatter, a field with the name ${key}Verbose
+// is also added with the full verbose error message.
+//
+// Finally, if the error implements errorGroup (from go.uber.org/multierr) or
+// causer (from github.com/pkg/errors), a ${key}Causes field is added with an
+// array of objects containing the errors this error was comprised of.
+//
+// {
+// "error": err.Error(),
+// "errorVerbose": fmt.Sprintf("%+v", err),
+// "errorCauses": [
+// ...
+// ],
+// }
+func encodeError(key string, err error, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the Error() method
+ defer func() {
+ if rerr := recover(); rerr != nil {
+ // If it's a nil pointer, just say "<nil>". The likeliest causes are a
+ // error that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "<nil>" is a nice result.
+ if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "<nil>")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", rerr)
+ }
+ }()
+
+ basic := err.Error()
+ enc.AddString(key, basic)
+
+ switch e := err.(type) {
+ case errorGroup:
+ return enc.AddArray(key+"Causes", errArray(e.Errors()))
+ case fmt.Formatter:
+ verbose := fmt.Sprintf("%+v", e)
+ if verbose != basic {
+ // This is a rich error type, like those produced by
+ // github.com/pkg/errors.
+ enc.AddString(key+"Verbose", verbose)
+ }
+ }
+ return nil
+}
+
+type errorGroup interface {
+ // Provides read-only access to the underlying list of errors, preferably
+ // without causing any allocs.
+ Errors() []error
+}
+
+// Note that errArry and errArrayElem are very similar to the version
+// implemented in the top-level error.go file. We can't re-use this because
+// that would require exporting errArray as part of the zapcore API.
+
+// Encodes a list of errors using the standard error encoding logic.
+type errArray []error
+
+func (errs errArray) MarshalLogArray(arr ArrayEncoder) error {
+ for i := range errs {
+ if errs[i] == nil {
+ continue
+ }
+
+ el := newErrArrayElem(errs[i])
+ arr.AppendObject(el)
+ el.Free()
+ }
+ return nil
+}
+
+var _errArrayElemPool = sync.Pool{New: func() interface{} {
+ return &errArrayElem{}
+}}
+
+// Encodes any error into a {"error": ...} re-using the same errors logic.
+//
+// May be passed in place of an array to build a single-element array.
+type errArrayElem struct{ err error }
+
+func newErrArrayElem(err error) *errArrayElem {
+ e := _errArrayElemPool.Get().(*errArrayElem)
+ e.err = err
+ return e
+}
+
+func (e *errArrayElem) MarshalLogArray(arr ArrayEncoder) error {
+ return arr.AppendObject(e)
+}
+
+func (e *errArrayElem) MarshalLogObject(enc ObjectEncoder) error {
+ return encodeError("error", e.err, enc)
+}
+
+func (e *errArrayElem) Free() {
+ e.err = nil
+ _errArrayElemPool.Put(e)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/field.go b/vendor/go.uber.org/zap/zapcore/field.go
new file mode 100644
index 0000000..95bdb0a
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/field.go
@@ -0,0 +1,233 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+)
+
+// A FieldType indicates which member of the Field union struct should be used
+// and how it should be serialized.
+type FieldType uint8
+
+const (
+ // UnknownType is the default field type. Attempting to add it to an encoder will panic.
+ UnknownType FieldType = iota
+ // ArrayMarshalerType indicates that the field carries an ArrayMarshaler.
+ ArrayMarshalerType
+ // ObjectMarshalerType indicates that the field carries an ObjectMarshaler.
+ ObjectMarshalerType
+ // BinaryType indicates that the field carries an opaque binary blob.
+ BinaryType
+ // BoolType indicates that the field carries a bool.
+ BoolType
+ // ByteStringType indicates that the field carries UTF-8 encoded bytes.
+ ByteStringType
+ // Complex128Type indicates that the field carries a complex128.
+ Complex128Type
+ // Complex64Type indicates that the field carries a complex128.
+ Complex64Type
+ // DurationType indicates that the field carries a time.Duration.
+ DurationType
+ // Float64Type indicates that the field carries a float64.
+ Float64Type
+ // Float32Type indicates that the field carries a float32.
+ Float32Type
+ // Int64Type indicates that the field carries an int64.
+ Int64Type
+ // Int32Type indicates that the field carries an int32.
+ Int32Type
+ // Int16Type indicates that the field carries an int16.
+ Int16Type
+ // Int8Type indicates that the field carries an int8.
+ Int8Type
+ // StringType indicates that the field carries a string.
+ StringType
+ // TimeType indicates that the field carries a time.Time that is
+ // representable by a UnixNano() stored as an int64.
+ TimeType
+ // TimeFullType indicates that the field carries a time.Time stored as-is.
+ TimeFullType
+ // Uint64Type indicates that the field carries a uint64.
+ Uint64Type
+ // Uint32Type indicates that the field carries a uint32.
+ Uint32Type
+ // Uint16Type indicates that the field carries a uint16.
+ Uint16Type
+ // Uint8Type indicates that the field carries a uint8.
+ Uint8Type
+ // UintptrType indicates that the field carries a uintptr.
+ UintptrType
+ // ReflectType indicates that the field carries an interface{}, which should
+ // be serialized using reflection.
+ ReflectType
+ // NamespaceType signals the beginning of an isolated namespace. All
+ // subsequent fields should be added to the new namespace.
+ NamespaceType
+ // StringerType indicates that the field carries a fmt.Stringer.
+ StringerType
+ // ErrorType indicates that the field carries an error.
+ ErrorType
+ // SkipType indicates that the field is a no-op.
+ SkipType
+
+ // InlineMarshalerType indicates that the field carries an ObjectMarshaler
+ // that should be inlined.
+ InlineMarshalerType
+)
+
+// A Field is a marshaling operation used to add a key-value pair to a logger's
+// context. Most fields are lazily marshaled, so it's inexpensive to add fields
+// to disabled debug-level log statements.
+type Field struct {
+ Key string
+ Type FieldType
+ Integer int64
+ String string
+ Interface interface{}
+}
+
+// AddTo exports a field through the ObjectEncoder interface. It's primarily
+// useful to library authors, and shouldn't be necessary in most applications.
+func (f Field) AddTo(enc ObjectEncoder) {
+ var err error
+
+ switch f.Type {
+ case ArrayMarshalerType:
+ err = enc.AddArray(f.Key, f.Interface.(ArrayMarshaler))
+ case ObjectMarshalerType:
+ err = enc.AddObject(f.Key, f.Interface.(ObjectMarshaler))
+ case InlineMarshalerType:
+ err = f.Interface.(ObjectMarshaler).MarshalLogObject(enc)
+ case BinaryType:
+ enc.AddBinary(f.Key, f.Interface.([]byte))
+ case BoolType:
+ enc.AddBool(f.Key, f.Integer == 1)
+ case ByteStringType:
+ enc.AddByteString(f.Key, f.Interface.([]byte))
+ case Complex128Type:
+ enc.AddComplex128(f.Key, f.Interface.(complex128))
+ case Complex64Type:
+ enc.AddComplex64(f.Key, f.Interface.(complex64))
+ case DurationType:
+ enc.AddDuration(f.Key, time.Duration(f.Integer))
+ case Float64Type:
+ enc.AddFloat64(f.Key, math.Float64frombits(uint64(f.Integer)))
+ case Float32Type:
+ enc.AddFloat32(f.Key, math.Float32frombits(uint32(f.Integer)))
+ case Int64Type:
+ enc.AddInt64(f.Key, f.Integer)
+ case Int32Type:
+ enc.AddInt32(f.Key, int32(f.Integer))
+ case Int16Type:
+ enc.AddInt16(f.Key, int16(f.Integer))
+ case Int8Type:
+ enc.AddInt8(f.Key, int8(f.Integer))
+ case StringType:
+ enc.AddString(f.Key, f.String)
+ case TimeType:
+ if f.Interface != nil {
+ enc.AddTime(f.Key, time.Unix(0, f.Integer).In(f.Interface.(*time.Location)))
+ } else {
+ // Fall back to UTC if location is nil.
+ enc.AddTime(f.Key, time.Unix(0, f.Integer))
+ }
+ case TimeFullType:
+ enc.AddTime(f.Key, f.Interface.(time.Time))
+ case Uint64Type:
+ enc.AddUint64(f.Key, uint64(f.Integer))
+ case Uint32Type:
+ enc.AddUint32(f.Key, uint32(f.Integer))
+ case Uint16Type:
+ enc.AddUint16(f.Key, uint16(f.Integer))
+ case Uint8Type:
+ enc.AddUint8(f.Key, uint8(f.Integer))
+ case UintptrType:
+ enc.AddUintptr(f.Key, uintptr(f.Integer))
+ case ReflectType:
+ err = enc.AddReflected(f.Key, f.Interface)
+ case NamespaceType:
+ enc.OpenNamespace(f.Key)
+ case StringerType:
+ err = encodeStringer(f.Key, f.Interface, enc)
+ case ErrorType:
+ err = encodeError(f.Key, f.Interface.(error), enc)
+ case SkipType:
+ break
+ default:
+ panic(fmt.Sprintf("unknown field type: %v", f))
+ }
+
+ if err != nil {
+ enc.AddString(fmt.Sprintf("%sError", f.Key), err.Error())
+ }
+}
+
+// Equals returns whether two fields are equal. For non-primitive types such as
+// errors, marshalers, or reflect types, it uses reflect.DeepEqual.
+func (f Field) Equals(other Field) bool {
+ if f.Type != other.Type {
+ return false
+ }
+ if f.Key != other.Key {
+ return false
+ }
+
+ switch f.Type {
+ case BinaryType, ByteStringType:
+ return bytes.Equal(f.Interface.([]byte), other.Interface.([]byte))
+ case ArrayMarshalerType, ObjectMarshalerType, ErrorType, ReflectType:
+ return reflect.DeepEqual(f.Interface, other.Interface)
+ default:
+ return f == other
+ }
+}
+
+func addFields(enc ObjectEncoder, fields []Field) {
+ for i := range fields {
+ fields[i].AddTo(enc)
+ }
+}
+
+func encodeStringer(key string, stringer interface{}, enc ObjectEncoder) (retErr error) {
+ // Try to capture panics (from nil references or otherwise) when calling
+ // the String() method, similar to https://golang.org/src/fmt/print.go#L540
+ defer func() {
+ if err := recover(); err != nil {
+ // If it's a nil pointer, just say "<nil>". The likeliest causes are a
+ // Stringer that fails to guard against nil or a nil pointer for a
+ // value receiver, and in either case, "<nil>" is a nice result.
+ if v := reflect.ValueOf(stringer); v.Kind() == reflect.Ptr && v.IsNil() {
+ enc.AddString(key, "<nil>")
+ return
+ }
+
+ retErr = fmt.Errorf("PANIC=%v", err)
+ }
+ }()
+
+ enc.AddString(key, stringer.(fmt.Stringer).String())
+ return nil
+}
diff --git a/vendor/go.uber.org/zap/zapcore/hook.go b/vendor/go.uber.org/zap/zapcore/hook.go
new file mode 100644
index 0000000..5db4afb
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/hook.go
@@ -0,0 +1,68 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type hooked struct {
+ Core
+ funcs []func(Entry) error
+}
+
+// RegisterHooks wraps a Core and runs a collection of user-defined callback
+// hooks each time a message is logged. Execution of the callbacks is blocking.
+//
+// This offers users an easy way to register simple callbacks (e.g., metrics
+// collection) without implementing the full Core interface.
+func RegisterHooks(core Core, hooks ...func(Entry) error) Core {
+ funcs := append([]func(Entry) error{}, hooks...)
+ return &hooked{
+ Core: core,
+ funcs: funcs,
+ }
+}
+
+func (h *hooked) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ // Let the wrapped Core decide whether to log this message or not. This
+ // also gives the downstream a chance to register itself directly with the
+ // CheckedEntry.
+ if downstream := h.Core.Check(ent, ce); downstream != nil {
+ return downstream.AddCore(ent, h)
+ }
+ return ce
+}
+
+func (h *hooked) With(fields []Field) Core {
+ return &hooked{
+ Core: h.Core.With(fields),
+ funcs: h.funcs,
+ }
+}
+
+func (h *hooked) Write(ent Entry, _ []Field) error {
+ // Since our downstream had a chance to register itself directly with the
+ // CheckedMessage, we don't need to call it here.
+ var err error
+ for i := range h.funcs {
+ err = multierr.Append(err, h.funcs[i](ent))
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/increase_level.go b/vendor/go.uber.org/zap/zapcore/increase_level.go
new file mode 100644
index 0000000..5a17492
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/increase_level.go
@@ -0,0 +1,66 @@
+// Copyright (c) 2020 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "fmt"
+
+type levelFilterCore struct {
+ core Core
+ level LevelEnabler
+}
+
+// NewIncreaseLevelCore creates a core that can be used to increase the level of
+// an existing Core. It cannot be used to decrease the logging level, as it acts
+// as a filter before calling the underlying core. If level decreases the log level,
+// an error is returned.
+func NewIncreaseLevelCore(core Core, level LevelEnabler) (Core, error) {
+ for l := _maxLevel; l >= _minLevel; l-- {
+ if !core.Enabled(l) && level.Enabled(l) {
+ return nil, fmt.Errorf("invalid increase level, as level %q is allowed by increased level, but not by existing core", l)
+ }
+ }
+
+ return &levelFilterCore{core, level}, nil
+}
+
+func (c *levelFilterCore) Enabled(lvl Level) bool {
+ return c.level.Enabled(lvl)
+}
+
+func (c *levelFilterCore) With(fields []Field) Core {
+ return &levelFilterCore{c.core.With(fields), c.level}
+}
+
+func (c *levelFilterCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !c.Enabled(ent.Level) {
+ return ce
+ }
+
+ return c.core.Check(ent, ce)
+}
+
+func (c *levelFilterCore) Write(ent Entry, fields []Field) error {
+ return c.core.Write(ent, fields)
+}
+
+func (c *levelFilterCore) Sync() error {
+ return c.core.Sync()
+}
diff --git a/vendor/go.uber.org/zap/zapcore/json_encoder.go b/vendor/go.uber.org/zap/zapcore/json_encoder.go
new file mode 100644
index 0000000..5cf7d91
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -0,0 +1,534 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "math"
+ "sync"
+ "time"
+ "unicode/utf8"
+
+ "go.uber.org/zap/buffer"
+ "go.uber.org/zap/internal/bufferpool"
+)
+
+// For JSON-escaping; see jsonEncoder.safeAddString below.
+const _hex = "0123456789abcdef"
+
+var _jsonPool = sync.Pool{New: func() interface{} {
+ return &jsonEncoder{}
+}}
+
+func getJSONEncoder() *jsonEncoder {
+ return _jsonPool.Get().(*jsonEncoder)
+}
+
+func putJSONEncoder(enc *jsonEncoder) {
+ if enc.reflectBuf != nil {
+ enc.reflectBuf.Free()
+ }
+ enc.EncoderConfig = nil
+ enc.buf = nil
+ enc.spaced = false
+ enc.openNamespaces = 0
+ enc.reflectBuf = nil
+ enc.reflectEnc = nil
+ _jsonPool.Put(enc)
+}
+
+type jsonEncoder struct {
+ *EncoderConfig
+ buf *buffer.Buffer
+ spaced bool // include spaces after colons and commas
+ openNamespaces int
+
+ // for encoding generic values by reflection
+ reflectBuf *buffer.Buffer
+ reflectEnc *json.Encoder
+}
+
+// NewJSONEncoder creates a fast, low-allocation JSON encoder. The encoder
+// appropriately escapes all field keys and values.
+//
+// Note that the encoder doesn't deduplicate keys, so it's possible to produce
+// a message like
+// {"foo":"bar","foo":"baz"}
+// This is permitted by the JSON specification, but not encouraged. Many
+// libraries will ignore duplicate key-value pairs (typically keeping the last
+// pair) when unmarshaling, but users should attempt to avoid adding duplicate
+// keys.
+func NewJSONEncoder(cfg EncoderConfig) Encoder {
+ return newJSONEncoder(cfg, false)
+}
+
+func newJSONEncoder(cfg EncoderConfig, spaced bool) *jsonEncoder {
+ return &jsonEncoder{
+ EncoderConfig: &cfg,
+ buf: bufferpool.Get(),
+ spaced: spaced,
+ }
+}
+
+func (enc *jsonEncoder) AddArray(key string, arr ArrayMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendArray(arr)
+}
+
+func (enc *jsonEncoder) AddObject(key string, obj ObjectMarshaler) error {
+ enc.addKey(key)
+ return enc.AppendObject(obj)
+}
+
+func (enc *jsonEncoder) AddBinary(key string, val []byte) {
+ enc.AddString(key, base64.StdEncoding.EncodeToString(val))
+}
+
+func (enc *jsonEncoder) AddByteString(key string, val []byte) {
+ enc.addKey(key)
+ enc.AppendByteString(val)
+}
+
+func (enc *jsonEncoder) AddBool(key string, val bool) {
+ enc.addKey(key)
+ enc.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AddComplex128(key string, val complex128) {
+ enc.addKey(key)
+ enc.AppendComplex128(val)
+}
+
+func (enc *jsonEncoder) AddDuration(key string, val time.Duration) {
+ enc.addKey(key)
+ enc.AppendDuration(val)
+}
+
+func (enc *jsonEncoder) AddFloat64(key string, val float64) {
+ enc.addKey(key)
+ enc.AppendFloat64(val)
+}
+
+func (enc *jsonEncoder) AddInt64(key string, val int64) {
+ enc.addKey(key)
+ enc.AppendInt64(val)
+}
+
+func (enc *jsonEncoder) resetReflectBuf() {
+ if enc.reflectBuf == nil {
+ enc.reflectBuf = bufferpool.Get()
+ enc.reflectEnc = json.NewEncoder(enc.reflectBuf)
+
+ // For consistency with our custom JSON encoder.
+ enc.reflectEnc.SetEscapeHTML(false)
+ } else {
+ enc.reflectBuf.Reset()
+ }
+}
+
+var nullLiteralBytes = []byte("null")
+
+// Only invoke the standard JSON encoder if there is actually something to
+// encode; otherwise write JSON null literal directly.
+func (enc *jsonEncoder) encodeReflected(obj interface{}) ([]byte, error) {
+ if obj == nil {
+ return nullLiteralBytes, nil
+ }
+ enc.resetReflectBuf()
+ if err := enc.reflectEnc.Encode(obj); err != nil {
+ return nil, err
+ }
+ enc.reflectBuf.TrimNewline()
+ return enc.reflectBuf.Bytes(), nil
+}
+
+func (enc *jsonEncoder) AddReflected(key string, obj interface{}) error {
+ valueBytes, err := enc.encodeReflected(obj)
+ if err != nil {
+ return err
+ }
+ enc.addKey(key)
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) OpenNamespace(key string) {
+ enc.addKey(key)
+ enc.buf.AppendByte('{')
+ enc.openNamespaces++
+}
+
+func (enc *jsonEncoder) AddString(key, val string) {
+ enc.addKey(key)
+ enc.AppendString(val)
+}
+
+func (enc *jsonEncoder) AddTime(key string, val time.Time) {
+ enc.addKey(key)
+ enc.AppendTime(val)
+}
+
+func (enc *jsonEncoder) AddUint64(key string, val uint64) {
+ enc.addKey(key)
+ enc.AppendUint64(val)
+}
+
+func (enc *jsonEncoder) AppendArray(arr ArrayMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('[')
+ err := arr.MarshalLogArray(enc)
+ enc.buf.AppendByte(']')
+ return err
+}
+
+func (enc *jsonEncoder) AppendObject(obj ObjectMarshaler) error {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('{')
+ err := obj.MarshalLogObject(enc)
+ enc.buf.AppendByte('}')
+ return err
+}
+
+func (enc *jsonEncoder) AppendBool(val bool) {
+ enc.addElementSeparator()
+ enc.buf.AppendBool(val)
+}
+
+func (enc *jsonEncoder) AppendByteString(val []byte) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddByteString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendComplex128(val complex128) {
+ enc.addElementSeparator()
+ // Cast to a platform-independent, fixed-size type.
+ r, i := float64(real(val)), float64(imag(val))
+ enc.buf.AppendByte('"')
+ // Because we're always in a quoted string, we can use strconv without
+ // special-casing NaN and +/-Inf.
+ enc.buf.AppendFloat(r, 64)
+ enc.buf.AppendByte('+')
+ enc.buf.AppendFloat(i, 64)
+ enc.buf.AppendByte('i')
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendDuration(val time.Duration) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeDuration; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeDuration is a no-op. Fall back to nanoseconds to keep
+ // JSON valid.
+ enc.AppendInt64(int64(val))
+ }
+}
+
+func (enc *jsonEncoder) AppendInt64(val int64) {
+ enc.addElementSeparator()
+ enc.buf.AppendInt(val)
+}
+
+func (enc *jsonEncoder) AppendReflected(val interface{}) error {
+ valueBytes, err := enc.encodeReflected(val)
+ if err != nil {
+ return err
+ }
+ enc.addElementSeparator()
+ _, err = enc.buf.Write(valueBytes)
+ return err
+}
+
+func (enc *jsonEncoder) AppendString(val string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(val)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTimeLayout(time time.Time, layout string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.buf.AppendTime(time, layout)
+ enc.buf.AppendByte('"')
+}
+
+func (enc *jsonEncoder) AppendTime(val time.Time) {
+ cur := enc.buf.Len()
+ if e := enc.EncodeTime; e != nil {
+ e(val, enc)
+ }
+ if cur == enc.buf.Len() {
+ // User-supplied EncodeTime is a no-op. Fall back to nanos since epoch to keep
+ // output JSON valid.
+ enc.AppendInt64(val.UnixNano())
+ }
+}
+
+func (enc *jsonEncoder) AppendUint64(val uint64) {
+ enc.addElementSeparator()
+ enc.buf.AppendUint(val)
+}
+
+func (enc *jsonEncoder) AddComplex64(k string, v complex64) { enc.AddComplex128(k, complex128(v)) }
+func (enc *jsonEncoder) AddFloat32(k string, v float32) { enc.AddFloat64(k, float64(v)) }
+func (enc *jsonEncoder) AddInt(k string, v int) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt32(k string, v int32) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt16(k string, v int16) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddInt8(k string, v int8) { enc.AddInt64(k, int64(v)) }
+func (enc *jsonEncoder) AddUint(k string, v uint) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint32(k string, v uint32) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint16(k string, v uint16) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUint8(k string, v uint8) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AddUintptr(k string, v uintptr) { enc.AddUint64(k, uint64(v)) }
+func (enc *jsonEncoder) AppendComplex64(v complex64) { enc.AppendComplex128(complex128(v)) }
+func (enc *jsonEncoder) AppendFloat64(v float64) { enc.appendFloat(v, 64) }
+func (enc *jsonEncoder) AppendFloat32(v float32) { enc.appendFloat(float64(v), 32) }
+func (enc *jsonEncoder) AppendInt(v int) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt32(v int32) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt16(v int16) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendInt8(v int8) { enc.AppendInt64(int64(v)) }
+func (enc *jsonEncoder) AppendUint(v uint) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint32(v uint32) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint16(v uint16) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUint8(v uint8) { enc.AppendUint64(uint64(v)) }
+func (enc *jsonEncoder) AppendUintptr(v uintptr) { enc.AppendUint64(uint64(v)) }
+
+func (enc *jsonEncoder) Clone() Encoder {
+ clone := enc.clone()
+ clone.buf.Write(enc.buf.Bytes())
+ return clone
+}
+
+func (enc *jsonEncoder) clone() *jsonEncoder {
+ clone := getJSONEncoder()
+ clone.EncoderConfig = enc.EncoderConfig
+ clone.spaced = enc.spaced
+ clone.openNamespaces = enc.openNamespaces
+ clone.buf = bufferpool.Get()
+ return clone
+}
+
+func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer, error) {
+ final := enc.clone()
+ final.buf.AppendByte('{')
+
+ if final.LevelKey != "" {
+ final.addKey(final.LevelKey)
+ cur := final.buf.Len()
+ final.EncodeLevel(ent.Level, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeLevel was a no-op. Fall back to strings to keep
+ // output JSON valid.
+ final.AppendString(ent.Level.String())
+ }
+ }
+ if final.TimeKey != "" {
+ final.AddTime(final.TimeKey, ent.Time)
+ }
+ if ent.LoggerName != "" && final.NameKey != "" {
+ final.addKey(final.NameKey)
+ cur := final.buf.Len()
+ nameEncoder := final.EncodeName
+
+ // if no name encoder provided, fall back to FullNameEncoder for backwards
+ // compatibility
+ if nameEncoder == nil {
+ nameEncoder = FullNameEncoder
+ }
+
+ nameEncoder(ent.LoggerName, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeName was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.LoggerName)
+ }
+ }
+ if ent.Caller.Defined {
+ if final.CallerKey != "" {
+ final.addKey(final.CallerKey)
+ cur := final.buf.Len()
+ final.EncodeCaller(ent.Caller, final)
+ if cur == final.buf.Len() {
+ // User-supplied EncodeCaller was a no-op. Fall back to strings to
+ // keep output JSON valid.
+ final.AppendString(ent.Caller.String())
+ }
+ }
+ if final.FunctionKey != "" {
+ final.addKey(final.FunctionKey)
+ final.AppendString(ent.Caller.Function)
+ }
+ }
+ if final.MessageKey != "" {
+ final.addKey(enc.MessageKey)
+ final.AppendString(ent.Message)
+ }
+ if enc.buf.Len() > 0 {
+ final.addElementSeparator()
+ final.buf.Write(enc.buf.Bytes())
+ }
+ addFields(final, fields)
+ final.closeOpenNamespaces()
+ if ent.Stack != "" && final.StacktraceKey != "" {
+ final.AddString(final.StacktraceKey, ent.Stack)
+ }
+ final.buf.AppendByte('}')
+ if final.LineEnding != "" {
+ final.buf.AppendString(final.LineEnding)
+ } else {
+ final.buf.AppendString(DefaultLineEnding)
+ }
+
+ ret := final.buf
+ putJSONEncoder(final)
+ return ret, nil
+}
+
+func (enc *jsonEncoder) truncate() {
+ enc.buf.Reset()
+}
+
+func (enc *jsonEncoder) closeOpenNamespaces() {
+ for i := 0; i < enc.openNamespaces; i++ {
+ enc.buf.AppendByte('}')
+ }
+}
+
+func (enc *jsonEncoder) addKey(key string) {
+ enc.addElementSeparator()
+ enc.buf.AppendByte('"')
+ enc.safeAddString(key)
+ enc.buf.AppendByte('"')
+ enc.buf.AppendByte(':')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+}
+
+func (enc *jsonEncoder) addElementSeparator() {
+ last := enc.buf.Len() - 1
+ if last < 0 {
+ return
+ }
+ switch enc.buf.Bytes()[last] {
+ case '{', '[', ':', ',', ' ':
+ return
+ default:
+ enc.buf.AppendByte(',')
+ if enc.spaced {
+ enc.buf.AppendByte(' ')
+ }
+ }
+}
+
+func (enc *jsonEncoder) appendFloat(val float64, bitSize int) {
+ enc.addElementSeparator()
+ switch {
+ case math.IsNaN(val):
+ enc.buf.AppendString(`"NaN"`)
+ case math.IsInf(val, 1):
+ enc.buf.AppendString(`"+Inf"`)
+ case math.IsInf(val, -1):
+ enc.buf.AppendString(`"-Inf"`)
+ default:
+ enc.buf.AppendFloat(val, bitSize)
+ }
+}
+
+// safeAddString JSON-escapes a string and appends it to the internal buffer.
+// Unlike the standard library's encoder, it doesn't attempt to protect the
+// user from browser vulnerabilities or JSONP-related problems.
+func (enc *jsonEncoder) safeAddString(s string) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRuneInString(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.AppendString(s[i : i+size])
+ i += size
+ }
+}
+
+// safeAddByteString is no-alloc equivalent of safeAddString(string(s)) for s []byte.
+func (enc *jsonEncoder) safeAddByteString(s []byte) {
+ for i := 0; i < len(s); {
+ if enc.tryAddRuneSelf(s[i]) {
+ i++
+ continue
+ }
+ r, size := utf8.DecodeRune(s[i:])
+ if enc.tryAddRuneError(r, size) {
+ i++
+ continue
+ }
+ enc.buf.Write(s[i : i+size])
+ i += size
+ }
+}
+
+// tryAddRuneSelf appends b if it is valid UTF-8 character represented in a single byte.
+func (enc *jsonEncoder) tryAddRuneSelf(b byte) bool {
+ if b >= utf8.RuneSelf {
+ return false
+ }
+ if 0x20 <= b && b != '\\' && b != '"' {
+ enc.buf.AppendByte(b)
+ return true
+ }
+ switch b {
+ case '\\', '"':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte(b)
+ case '\n':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('n')
+ case '\r':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('r')
+ case '\t':
+ enc.buf.AppendByte('\\')
+ enc.buf.AppendByte('t')
+ default:
+ // Encode bytes < 0x20, except for the escape sequences above.
+ enc.buf.AppendString(`\u00`)
+ enc.buf.AppendByte(_hex[b>>4])
+ enc.buf.AppendByte(_hex[b&0xF])
+ }
+ return true
+}
+
+func (enc *jsonEncoder) tryAddRuneError(r rune, size int) bool {
+ if r == utf8.RuneError && size == 1 {
+ enc.buf.AppendString(`\ufffd`)
+ return true
+ }
+ return false
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level.go b/vendor/go.uber.org/zap/zapcore/level.go
new file mode 100644
index 0000000..e575c9f
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+var errUnmarshalNilLevel = errors.New("can't unmarshal a nil *Level")
+
+// A Level is a logging priority. Higher levels are more important.
+type Level int8
+
+const (
+ // DebugLevel logs are typically voluminous, and are usually disabled in
+ // production.
+ DebugLevel Level = iota - 1
+ // InfoLevel is the default logging priority.
+ InfoLevel
+ // WarnLevel logs are more important than Info, but don't need individual
+ // human review.
+ WarnLevel
+ // ErrorLevel logs are high-priority. If an application is running smoothly,
+ // it shouldn't generate any error-level logs.
+ ErrorLevel
+ // DPanicLevel logs are particularly important errors. In development the
+ // logger panics after writing the message.
+ DPanicLevel
+ // PanicLevel logs a message, then panics.
+ PanicLevel
+ // FatalLevel logs a message, then calls os.Exit(1).
+ FatalLevel
+
+ _minLevel = DebugLevel
+ _maxLevel = FatalLevel
+)
+
+// String returns a lower-case ASCII representation of the log level.
+func (l Level) String() string {
+ switch l {
+ case DebugLevel:
+ return "debug"
+ case InfoLevel:
+ return "info"
+ case WarnLevel:
+ return "warn"
+ case ErrorLevel:
+ return "error"
+ case DPanicLevel:
+ return "dpanic"
+ case PanicLevel:
+ return "panic"
+ case FatalLevel:
+ return "fatal"
+ default:
+ return fmt.Sprintf("Level(%d)", l)
+ }
+}
+
+// CapitalString returns an all-caps ASCII representation of the log level.
+func (l Level) CapitalString() string {
+ // Printing levels in all-caps is common enough that we should export this
+ // functionality.
+ switch l {
+ case DebugLevel:
+ return "DEBUG"
+ case InfoLevel:
+ return "INFO"
+ case WarnLevel:
+ return "WARN"
+ case ErrorLevel:
+ return "ERROR"
+ case DPanicLevel:
+ return "DPANIC"
+ case PanicLevel:
+ return "PANIC"
+ case FatalLevel:
+ return "FATAL"
+ default:
+ return fmt.Sprintf("LEVEL(%d)", l)
+ }
+}
+
+// MarshalText marshals the Level to text. Note that the text representation
+// drops the -Level suffix (see example).
+func (l Level) MarshalText() ([]byte, error) {
+ return []byte(l.String()), nil
+}
+
+// UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText
+// expects the text representation of a Level to drop the -Level suffix (see
+// example).
+//
+// In particular, this makes it easy to configure logging levels using YAML,
+// TOML, or JSON files.
+func (l *Level) UnmarshalText(text []byte) error {
+ if l == nil {
+ return errUnmarshalNilLevel
+ }
+ if !l.unmarshalText(text) && !l.unmarshalText(bytes.ToLower(text)) {
+ return fmt.Errorf("unrecognized level: %q", text)
+ }
+ return nil
+}
+
+func (l *Level) unmarshalText(text []byte) bool {
+ switch string(text) {
+ case "debug", "DEBUG":
+ *l = DebugLevel
+ case "info", "INFO", "": // make the zero value useful
+ *l = InfoLevel
+ case "warn", "WARN":
+ *l = WarnLevel
+ case "error", "ERROR":
+ *l = ErrorLevel
+ case "dpanic", "DPANIC":
+ *l = DPanicLevel
+ case "panic", "PANIC":
+ *l = PanicLevel
+ case "fatal", "FATAL":
+ *l = FatalLevel
+ default:
+ return false
+ }
+ return true
+}
+
+// Set sets the level for the flag.Value interface.
+func (l *Level) Set(s string) error {
+ return l.UnmarshalText([]byte(s))
+}
+
+// Get gets the level for the flag.Getter interface.
+func (l *Level) Get() interface{} {
+ return *l
+}
+
+// Enabled returns true if the given level is at or above this level.
+func (l Level) Enabled(lvl Level) bool {
+ return lvl >= l
+}
+
+// LevelEnabler decides whether a given logging level is enabled when logging a
+// message.
+//
+// Enablers are intended to be used to implement deterministic filters;
+// concerns like sampling are better implemented as a Core.
+//
+// Each concrete Level value implements a static LevelEnabler which returns
+// true for itself and all higher logging levels. For example WarnLevel.Enabled()
+// will return true for WarnLevel, ErrorLevel, DPanicLevel, PanicLevel, and
+// FatalLevel, but return false for InfoLevel and DebugLevel.
+type LevelEnabler interface {
+ Enabled(Level) bool
+}
diff --git a/vendor/go.uber.org/zap/zapcore/level_strings.go b/vendor/go.uber.org/zap/zapcore/level_strings.go
new file mode 100644
index 0000000..7af8dad
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/level_strings.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/zap/internal/color"
+
+var (
+ _levelToColor = map[Level]color.Color{
+ DebugLevel: color.Magenta,
+ InfoLevel: color.Blue,
+ WarnLevel: color.Yellow,
+ ErrorLevel: color.Red,
+ DPanicLevel: color.Red,
+ PanicLevel: color.Red,
+ FatalLevel: color.Red,
+ }
+ _unknownLevelColor = color.Red
+
+ _levelToLowercaseColorString = make(map[Level]string, len(_levelToColor))
+ _levelToCapitalColorString = make(map[Level]string, len(_levelToColor))
+)
+
+func init() {
+ for level, color := range _levelToColor {
+ _levelToLowercaseColorString[level] = color.Add(level.String())
+ _levelToCapitalColorString[level] = color.Add(level.CapitalString())
+ }
+}
diff --git a/vendor/go.uber.org/zap/zapcore/marshaler.go b/vendor/go.uber.org/zap/zapcore/marshaler.go
new file mode 100644
index 0000000..c3c55ba
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/marshaler.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+// ObjectMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ObjectMarshaler is only used when zap.Object is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ObjectMarshaler interface {
+ MarshalLogObject(ObjectEncoder) error
+}
+
+// ObjectMarshalerFunc is a type adapter that turns a function into an
+// ObjectMarshaler.
+type ObjectMarshalerFunc func(ObjectEncoder) error
+
+// MarshalLogObject calls the underlying function.
+func (f ObjectMarshalerFunc) MarshalLogObject(enc ObjectEncoder) error {
+ return f(enc)
+}
+
+// ArrayMarshaler allows user-defined types to efficiently add themselves to the
+// logging context, and to selectively omit information which shouldn't be
+// included in logs (e.g., passwords).
+//
+// Note: ArrayMarshaler is only used when zap.Array is used or when
+// passed directly to zap.Any. It is not used when reflection-based
+// encoding is used.
+type ArrayMarshaler interface {
+ MarshalLogArray(ArrayEncoder) error
+}
+
+// ArrayMarshalerFunc is a type adapter that turns a function into an
+// ArrayMarshaler.
+type ArrayMarshalerFunc func(ArrayEncoder) error
+
+// MarshalLogArray calls the underlying function.
+func (f ArrayMarshalerFunc) MarshalLogArray(enc ArrayEncoder) error {
+ return f(enc)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/memory_encoder.go b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
new file mode 100644
index 0000000..dfead08
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/memory_encoder.go
@@ -0,0 +1,179 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "time"
+
+// MapObjectEncoder is an ObjectEncoder backed by a simple
+// map[string]interface{}. It's not fast enough for production use, but it's
+// helpful in tests.
+type MapObjectEncoder struct {
+ // Fields contains the entire encoded log context.
+ Fields map[string]interface{}
+ // cur is a pointer to the namespace we're currently writing to.
+ cur map[string]interface{}
+}
+
+// NewMapObjectEncoder creates a new map-backed ObjectEncoder.
+func NewMapObjectEncoder() *MapObjectEncoder {
+ m := make(map[string]interface{})
+ return &MapObjectEncoder{
+ Fields: m,
+ cur: m,
+ }
+}
+
+// AddArray implements ObjectEncoder.
+func (m *MapObjectEncoder) AddArray(key string, v ArrayMarshaler) error {
+ arr := &sliceArrayEncoder{elems: make([]interface{}, 0)}
+ err := v.MarshalLogArray(arr)
+ m.cur[key] = arr.elems
+ return err
+}
+
+// AddObject implements ObjectEncoder.
+func (m *MapObjectEncoder) AddObject(k string, v ObjectMarshaler) error {
+ newMap := NewMapObjectEncoder()
+ m.cur[k] = newMap.Fields
+ return v.MarshalLogObject(newMap)
+}
+
+// AddBinary implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBinary(k string, v []byte) { m.cur[k] = v }
+
+// AddByteString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddByteString(k string, v []byte) { m.cur[k] = string(v) }
+
+// AddBool implements ObjectEncoder.
+func (m *MapObjectEncoder) AddBool(k string, v bool) { m.cur[k] = v }
+
+// AddDuration implements ObjectEncoder.
+func (m MapObjectEncoder) AddDuration(k string, v time.Duration) { m.cur[k] = v }
+
+// AddComplex128 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex128(k string, v complex128) { m.cur[k] = v }
+
+// AddComplex64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddComplex64(k string, v complex64) { m.cur[k] = v }
+
+// AddFloat64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat64(k string, v float64) { m.cur[k] = v }
+
+// AddFloat32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddFloat32(k string, v float32) { m.cur[k] = v }
+
+// AddInt implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt(k string, v int) { m.cur[k] = v }
+
+// AddInt64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt64(k string, v int64) { m.cur[k] = v }
+
+// AddInt32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt32(k string, v int32) { m.cur[k] = v }
+
+// AddInt16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt16(k string, v int16) { m.cur[k] = v }
+
+// AddInt8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddInt8(k string, v int8) { m.cur[k] = v }
+
+// AddString implements ObjectEncoder.
+func (m *MapObjectEncoder) AddString(k string, v string) { m.cur[k] = v }
+
+// AddTime implements ObjectEncoder.
+func (m MapObjectEncoder) AddTime(k string, v time.Time) { m.cur[k] = v }
+
+// AddUint implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint(k string, v uint) { m.cur[k] = v }
+
+// AddUint64 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint64(k string, v uint64) { m.cur[k] = v }
+
+// AddUint32 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint32(k string, v uint32) { m.cur[k] = v }
+
+// AddUint16 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint16(k string, v uint16) { m.cur[k] = v }
+
+// AddUint8 implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUint8(k string, v uint8) { m.cur[k] = v }
+
+// AddUintptr implements ObjectEncoder.
+func (m *MapObjectEncoder) AddUintptr(k string, v uintptr) { m.cur[k] = v }
+
+// AddReflected implements ObjectEncoder.
+func (m *MapObjectEncoder) AddReflected(k string, v interface{}) error {
+ m.cur[k] = v
+ return nil
+}
+
+// OpenNamespace implements ObjectEncoder.
+func (m *MapObjectEncoder) OpenNamespace(k string) {
+ ns := make(map[string]interface{})
+ m.cur[k] = ns
+ m.cur = ns
+}
+
+// sliceArrayEncoder is an ArrayEncoder backed by a simple []interface{}. Like
+// the MapObjectEncoder, it's not designed for production use.
+type sliceArrayEncoder struct {
+ elems []interface{}
+}
+
+func (s *sliceArrayEncoder) AppendArray(v ArrayMarshaler) error {
+ enc := &sliceArrayEncoder{}
+ err := v.MarshalLogArray(enc)
+ s.elems = append(s.elems, enc.elems)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendObject(v ObjectMarshaler) error {
+ m := NewMapObjectEncoder()
+ err := v.MarshalLogObject(m)
+ s.elems = append(s.elems, m.Fields)
+ return err
+}
+
+func (s *sliceArrayEncoder) AppendReflected(v interface{}) error {
+ s.elems = append(s.elems, v)
+ return nil
+}
+
+func (s *sliceArrayEncoder) AppendBool(v bool) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendByteString(v []byte) { s.elems = append(s.elems, string(v)) }
+func (s *sliceArrayEncoder) AppendComplex128(v complex128) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendComplex64(v complex64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendDuration(v time.Duration) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat64(v float64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendFloat32(v float32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt(v int) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt64(v int64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt32(v int32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt16(v int16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendInt8(v int8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendString(v string) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendTime(v time.Time) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint(v uint) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint64(v uint64) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint32(v uint32) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint16(v uint16) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUint8(v uint8) { s.elems = append(s.elems, v) }
+func (s *sliceArrayEncoder) AppendUintptr(v uintptr) { s.elems = append(s.elems, v) }
diff --git a/vendor/go.uber.org/zap/zapcore/sampler.go b/vendor/go.uber.org/zap/zapcore/sampler.go
new file mode 100644
index 0000000..25f10ca
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/sampler.go
@@ -0,0 +1,208 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "time"
+
+ "go.uber.org/atomic"
+)
+
+const (
+ _numLevels = _maxLevel - _minLevel + 1
+ _countersPerLevel = 4096
+)
+
+type counter struct {
+ resetAt atomic.Int64
+ counter atomic.Uint64
+}
+
+type counters [_numLevels][_countersPerLevel]counter
+
+func newCounters() *counters {
+ return &counters{}
+}
+
+func (cs *counters) get(lvl Level, key string) *counter {
+ i := lvl - _minLevel
+ j := fnv32a(key) % _countersPerLevel
+ return &cs[i][j]
+}
+
+// fnv32a, adapted from "hash/fnv", but without a []byte(string) alloc
+func fnv32a(s string) uint32 {
+ const (
+ offset32 = 2166136261
+ prime32 = 16777619
+ )
+ hash := uint32(offset32)
+ for i := 0; i < len(s); i++ {
+ hash ^= uint32(s[i])
+ hash *= prime32
+ }
+ return hash
+}
+
+func (c *counter) IncCheckReset(t time.Time, tick time.Duration) uint64 {
+ tn := t.UnixNano()
+ resetAfter := c.resetAt.Load()
+ if resetAfter > tn {
+ return c.counter.Inc()
+ }
+
+ c.counter.Store(1)
+
+ newResetAfter := tn + tick.Nanoseconds()
+ if !c.resetAt.CAS(resetAfter, newResetAfter) {
+ // We raced with another goroutine trying to reset, and it also reset
+ // the counter to 1, so we need to reincrement the counter.
+ return c.counter.Inc()
+ }
+
+ return 1
+}
+
+// SamplingDecision is a decision represented as a bit field made by sampler.
+// More decisions may be added in the future.
+type SamplingDecision uint32
+
+const (
+ // LogDropped indicates that the Sampler dropped a log entry.
+ LogDropped SamplingDecision = 1 << iota
+ // LogSampled indicates that the Sampler sampled a log entry.
+ LogSampled
+)
+
+// optionFunc wraps a func so it satisfies the SamplerOption interface.
+type optionFunc func(*sampler)
+
+func (f optionFunc) apply(s *sampler) {
+ f(s)
+}
+
+// SamplerOption configures a Sampler.
+type SamplerOption interface {
+ apply(*sampler)
+}
+
+// nopSamplingHook is the default hook used by sampler.
+func nopSamplingHook(Entry, SamplingDecision) {}
+
+// SamplerHook registers a function which will be called when Sampler makes a
+// decision.
+//
+// This hook may be used to get visibility into the performance of the sampler.
+// For example, use it to track metrics of dropped versus sampled logs.
+//
+// var dropped atomic.Int64
+// zapcore.SamplerHook(func(ent zapcore.Entry, dec zapcore.SamplingDecision) {
+// if dec&zapcore.LogDropped > 0 {
+// dropped.Inc()
+// }
+// })
+func SamplerHook(hook func(entry Entry, dec SamplingDecision)) SamplerOption {
+ return optionFunc(func(s *sampler) {
+ s.hook = hook
+ })
+}
+
+// NewSamplerWithOptions creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Sampler can be configured to report sampling decisions with the SamplerHook
+// option.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+func NewSamplerWithOptions(core Core, tick time.Duration, first, thereafter int, opts ...SamplerOption) Core {
+ s := &sampler{
+ Core: core,
+ tick: tick,
+ counts: newCounters(),
+ first: uint64(first),
+ thereafter: uint64(thereafter),
+ hook: nopSamplingHook,
+ }
+ for _, opt := range opts {
+ opt.apply(s)
+ }
+
+ return s
+}
+
+type sampler struct {
+ Core
+
+ counts *counters
+ tick time.Duration
+ first, thereafter uint64
+ hook func(Entry, SamplingDecision)
+}
+
+// NewSampler creates a Core that samples incoming entries, which
+// caps the CPU and I/O load of logging while attempting to preserve a
+// representative subset of your logs.
+//
+// Zap samples by logging the first N entries with a given level and message
+// each tick. If more Entries with the same level and message are seen during
+// the same interval, every Mth message is logged and the rest are dropped.
+//
+// Keep in mind that zap's sampling implementation is optimized for speed over
+// absolute precision; under load, each tick may be slightly over- or
+// under-sampled.
+//
+// Deprecated: use NewSamplerWithOptions.
+func NewSampler(core Core, tick time.Duration, first, thereafter int) Core {
+ return NewSamplerWithOptions(core, tick, first, thereafter)
+}
+
+func (s *sampler) With(fields []Field) Core {
+ return &sampler{
+ Core: s.Core.With(fields),
+ tick: s.tick,
+ counts: s.counts,
+ first: s.first,
+ thereafter: s.thereafter,
+ hook: s.hook,
+ }
+}
+
+func (s *sampler) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ if !s.Enabled(ent.Level) {
+ return ce
+ }
+
+ counter := s.counts.get(ent.Level, ent.Message)
+ n := counter.IncCheckReset(ent.Time, s.tick)
+ if n > s.first && (n-s.first)%s.thereafter != 0 {
+ s.hook(ent, LogDropped)
+ return ce
+ }
+ s.hook(ent, LogSampled)
+ return s.Core.Check(ent, ce)
+}
diff --git a/vendor/go.uber.org/zap/zapcore/tee.go b/vendor/go.uber.org/zap/zapcore/tee.go
new file mode 100644
index 0000000..07a32ee
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/tee.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import "go.uber.org/multierr"
+
+type multiCore []Core
+
+// NewTee creates a Core that duplicates log entries into two or more
+// underlying Cores.
+//
+// Calling it with a single Core returns the input unchanged, and calling
+// it with no input returns a no-op Core.
+func NewTee(cores ...Core) Core {
+ switch len(cores) {
+ case 0:
+ return NewNopCore()
+ case 1:
+ return cores[0]
+ default:
+ return multiCore(cores)
+ }
+}
+
+func (mc multiCore) With(fields []Field) Core {
+ clone := make(multiCore, len(mc))
+ for i := range mc {
+ clone[i] = mc[i].With(fields)
+ }
+ return clone
+}
+
+func (mc multiCore) Enabled(lvl Level) bool {
+ for i := range mc {
+ if mc[i].Enabled(lvl) {
+ return true
+ }
+ }
+ return false
+}
+
+func (mc multiCore) Check(ent Entry, ce *CheckedEntry) *CheckedEntry {
+ for i := range mc {
+ ce = mc[i].Check(ent, ce)
+ }
+ return ce
+}
+
+func (mc multiCore) Write(ent Entry, fields []Field) error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Write(ent, fields))
+ }
+ return err
+}
+
+func (mc multiCore) Sync() error {
+ var err error
+ for i := range mc {
+ err = multierr.Append(err, mc[i].Sync())
+ }
+ return err
+}
diff --git a/vendor/go.uber.org/zap/zapcore/write_syncer.go b/vendor/go.uber.org/zap/zapcore/write_syncer.go
new file mode 100644
index 0000000..d4a1af3
--- /dev/null
+++ b/vendor/go.uber.org/zap/zapcore/write_syncer.go
@@ -0,0 +1,122 @@
+// Copyright (c) 2016 Uber Technologies, Inc.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package zapcore
+
+import (
+ "io"
+ "sync"
+
+ "go.uber.org/multierr"
+)
+
+// A WriteSyncer is an io.Writer that can also flush any buffered data. Note
+// that *os.File (and thus, os.Stderr and os.Stdout) implement WriteSyncer.
+type WriteSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// AddSync converts an io.Writer to a WriteSyncer. It attempts to be
+// intelligent: if the concrete type of the io.Writer implements WriteSyncer,
+// we'll use the existing Sync method. If it doesn't, we'll add a no-op Sync.
+func AddSync(w io.Writer) WriteSyncer {
+ switch w := w.(type) {
+ case WriteSyncer:
+ return w
+ default:
+ return writerWrapper{w}
+ }
+}
+
+type lockedWriteSyncer struct {
+ sync.Mutex
+ ws WriteSyncer
+}
+
+// Lock wraps a WriteSyncer in a mutex to make it safe for concurrent use. In
+// particular, *os.Files must be locked before use.
+func Lock(ws WriteSyncer) WriteSyncer {
+ if _, ok := ws.(*lockedWriteSyncer); ok {
+ // no need to layer on another lock
+ return ws
+ }
+ return &lockedWriteSyncer{ws: ws}
+}
+
+func (s *lockedWriteSyncer) Write(bs []byte) (int, error) {
+ s.Lock()
+ n, err := s.ws.Write(bs)
+ s.Unlock()
+ return n, err
+}
+
+func (s *lockedWriteSyncer) Sync() error {
+ s.Lock()
+ err := s.ws.Sync()
+ s.Unlock()
+ return err
+}
+
+type writerWrapper struct {
+ io.Writer
+}
+
+func (w writerWrapper) Sync() error {
+ return nil
+}
+
+type multiWriteSyncer []WriteSyncer
+
+// NewMultiWriteSyncer creates a WriteSyncer that duplicates its writes
+// and sync calls, much like io.MultiWriter.
+func NewMultiWriteSyncer(ws ...WriteSyncer) WriteSyncer {
+ if len(ws) == 1 {
+ return ws[0]
+ }
+ return multiWriteSyncer(ws)
+}
+
+// See https://golang.org/src/io/multi.go
+// When not all underlying syncers write the same number of bytes,
+// the smallest number is returned even though Write() is called on
+// all of them.
+func (ws multiWriteSyncer) Write(p []byte) (int, error) {
+ var writeErr error
+ nWritten := 0
+ for _, w := range ws {
+ n, err := w.Write(p)
+ writeErr = multierr.Append(writeErr, err)
+ if nWritten == 0 && n != 0 {
+ nWritten = n
+ } else if n < nWritten {
+ nWritten = n
+ }
+ }
+ return nWritten, writeErr
+}
+
+func (ws multiWriteSyncer) Sync() error {
+ var err error
+ for _, w := range ws {
+ err = multierr.Append(err, w.Sync())
+ }
+ return err
+}
diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE
new file mode 100644
index 0000000..2683e4b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/LICENSE
@@ -0,0 +1,50 @@
+
+This project is covered by two different licenses: MIT and Apache.
+
+#### MIT License ####
+
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original MIT license, with the additional
+copyright staring in 2011 when the project was ported over:
+
+ apic.go emitterc.go parserc.go readerc.go scannerc.go
+ writerc.go yamlh.go yamlprivateh.go
+
+Copyright (c) 2006-2010 Kirill Simonov
+Copyright (c) 2006-2011 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+### Apache License ###
+
+All the remaining project files are covered by the Apache license:
+
+Copyright (c) 2011-2019 Canonical Ltd
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/NOTICE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md
new file mode 100644
index 0000000..08eb1ba
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/README.md
@@ -0,0 +1,150 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.2, but preserves some behavior
+from 1.1 for backwards compatibility.
+
+Specifically, as of v3 of the yaml package:
+
+ - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being
+ decoded into a typed bool value. Otherwise they behave as a string. Booleans
+ in YAML 1.2 are _true/false_ only.
+ - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_
+ as specified in YAML 1.2, because most parsers still use the old format.
+ Octals in the _0o777_ format are supported though, so new files work.
+ - Does not support base-60 floats. These are gone from YAML 1.2, and were
+ actually never supported by this package as it's clearly a poor choice.
+
+and offers backwards
+compatibility with YAML 1.1 in some cases.
+1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v3*.
+
+To install it, run:
+
+ go get gopkg.in/yaml.v3
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+ - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3)
+
+API stability
+-------------
+
+The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the MIT and Apache License 2.0 licenses.
+Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+ "fmt"
+ "log"
+
+ "gopkg.in/yaml.v3"
+)
+
+var data = `
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+`
+
+// Note: struct fields must be public in order for unmarshal to
+// correctly populate the data.
+type T struct {
+ A string
+ B struct {
+ RenamedC int `yaml:"c"`
+ D []int `yaml:",flow"`
+ }
+}
+
+func main() {
+ t := T{}
+
+ err := yaml.Unmarshal([]byte(data), &t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t:\n%v\n\n", t)
+
+ d, err := yaml.Marshal(&t)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- t dump:\n%s\n\n", string(d))
+
+ m := make(map[interface{}]interface{})
+
+ err = yaml.Unmarshal([]byte(data), &m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m:\n%v\n\n", m)
+
+ d, err = yaml.Marshal(&m)
+ if err != nil {
+ log.Fatalf("error: %v", err)
+ }
+ fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+ c: 2
+ d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+ c: 2
+ d:
+ - 3
+ - 4
+```
+
diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go
new file mode 100644
index 0000000..ae7d049
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/apic.go
@@ -0,0 +1,747 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+ // Check if we can move the queue at the beginning of the buffer.
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, input_raw_buffer_size),
+ buffer: make([]byte, 0, input_buffer_size),
+ }
+ return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+ n = copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+// Reader read handler.
+func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+ return parser.input_reader.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_string_read_handler
+ parser.input = input
+ parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
+ if parser.read_handler != nil {
+ panic("must set the input source only once")
+ }
+ parser.read_handler = yaml_reader_read_handler
+ parser.input_reader = r
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("must set the encoding only once")
+ }
+ parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, output_buffer_size),
+ raw_buffer: make([]byte, 0, output_raw_buffer_size),
+ states: make([]yaml_emitter_state_t, 0, initial_stack_size),
+ events: make([]yaml_event_t, 0, initial_queue_size),
+ best_width: -1,
+ }
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+// yaml_writer_write_handler uses emitter.output_writer to write the
+// emitted text.
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("must set the output target only once")
+ }
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("must set the output encoding only once")
+ }
+ emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+// assert(token); // Non-NULL token object expected.
+//
+// switch (token.type)
+// {
+// case YAML_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case YAML_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case YAML_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case YAML_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case YAML_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ }
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(
+ event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool,
+) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+// Create ALIAS.
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool {
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+ return true
+}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+ return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ }
+ return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ }
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives_start *yaml_tag_directive_t,
+// tag_directives_end *yaml_tag_directive_t,
+// start_implicit int, end_implicit int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// struct {
+// start *yaml_node_t
+// end *yaml_node_t
+// top *yaml_node_t
+// } nodes = { NULL, NULL, NULL }
+// version_directive_copy *yaml_version_directive_t = NULL
+// struct {
+// start *yaml_tag_directive_t
+// end *yaml_tag_directive_t
+// top *yaml_tag_directive_t
+// } tag_directives_copy = { NULL, NULL, NULL }
+// value yaml_tag_directive_t = { NULL, NULL }
+// mark yaml_mark_t = { 0, 0, 0 }
+//
+// assert(document) // Non-NULL document object is expected.
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end))
+// // Valid tag directives are expected.
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+// if (!version_directive_copy) goto error
+// version_directive_copy.major = version_directive.major
+// version_directive_copy.minor = version_directive.minor
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// tag_directive *yaml_tag_directive_t
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error
+// for (tag_directive = tag_directives_start
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle)
+// assert(tag_directive.prefix)
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error
+// value.handle = yaml_strdup(tag_directive.handle)
+// value.prefix = yaml_strdup(tag_directive.prefix)
+// if (!value.handle || !value.prefix) goto error
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error
+// value.handle = NULL
+// value.prefix = NULL
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark)
+//
+// return 1
+//
+//error:
+// STACK_DEL(&context, nodes)
+// yaml_free(version_directive_copy)
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+// }
+// STACK_DEL(&context, tag_directives_copy)
+// yaml_free(value.handle)
+// yaml_free(value.prefix)
+//
+// return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// tag_directive *yaml_tag_directive_t
+//
+// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// node yaml_node_t = POP(&context, document.nodes)
+// yaml_free(node.tag)
+// switch (node.type) {
+// case YAML_SCALAR_NODE:
+// yaml_free(node.data.scalar.value)
+// break
+// case YAML_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items)
+// break
+// case YAML_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs)
+// break
+// default:
+// assert(0) // Should not happen.
+// }
+// }
+// STACK_DEL(&context, document.nodes)
+//
+// yaml_free(document.version_directive)
+// for (tag_directive = document.tag_directives.start
+// tag_directive != document.tag_directives.end
+// tag_directive++) {
+// yaml_free(tag_directive.handle)
+// yaml_free(tag_directive.prefix)
+// }
+// yaml_free(document.tag_directives.start)
+//
+// memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1
+// }
+// return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+// assert(document) // Non-NULL document object is expected.
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start
+// }
+// return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+// tag *yaml_char_t, value *yaml_char_t, length int,
+// style yaml_scalar_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// value_copy *yaml_char_t = NULL
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+// assert(value) // Non-NULL value is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (length < 0) {
+// length = strlen((char *)value)
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error
+// value_copy = yaml_malloc(length+1)
+// if (!value_copy) goto error
+// memcpy(value_copy, value, length)
+// value_copy[length] = '\0'
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// yaml_free(tag_copy)
+// yaml_free(value_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_item_t
+// end *yaml_node_item_t
+// top *yaml_node_item_t
+// } items = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, items)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+// tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+// mark yaml_mark_t = { 0, 0, 0 }
+// tag_copy *yaml_char_t = NULL
+// struct {
+// start *yaml_node_pair_t
+// end *yaml_node_pair_t
+// top *yaml_node_pair_t
+// } pairs = { NULL, NULL, NULL }
+// node yaml_node_t
+//
+// assert(document) // Non-NULL document object is expected.
+//
+// if (!tag) {
+// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+// tag_copy = yaml_strdup(tag)
+// if (!tag_copy) goto error
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark)
+// if (!PUSH(&context, document.nodes, node)) goto error
+//
+// return document.nodes.top - document.nodes.start
+//
+//error:
+// STACK_DEL(&context, pairs)
+// yaml_free(tag_copy)
+//
+// return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+// sequence int, item int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// assert(document) // Non-NULL document is required.
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top)
+// // Valid sequence id is required.
+// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+// // A sequence node is required.
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+// // Valid item id is required.
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0
+//
+// return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+// mapping int, key int, value int)
+//{
+// struct {
+// error yaml_error_type_t
+// } context
+//
+// pair yaml_node_pair_t
+//
+// assert(document) // Non-NULL document is required.
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top)
+// // Valid mapping id is required.
+// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+// // A mapping node is required.
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+// // Valid key id is required.
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+// // Valid value id is required.
+//
+// pair.key = key
+// pair.value = value
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0
+//
+// return 1
+//}
+//
+//
diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go
new file mode 100644
index 0000000..df36e3a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/decode.go
@@ -0,0 +1,950 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "math"
+ "reflect"
+ "strconv"
+ "time"
+)
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ doc *Node
+ anchors map[string]*Node
+ doneInit bool
+ textless bool
+}
+
+func newParser(b []byte) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ if len(b) == 0 {
+ b = []byte{'\n'}
+ }
+ yaml_parser_set_input_string(&p.parser, b)
+ return &p
+}
+
+func newParserFromReader(r io.Reader) *parser {
+ p := parser{}
+ if !yaml_parser_initialize(&p.parser) {
+ panic("failed to initialize YAML emitter")
+ }
+ yaml_parser_set_input_reader(&p.parser, r)
+ return &p
+}
+
+func (p *parser) init() {
+ if p.doneInit {
+ return
+ }
+ p.anchors = make(map[string]*Node)
+ p.expect(yaml_STREAM_START_EVENT)
+ p.doneInit = true
+}
+
+func (p *parser) destroy() {
+ if p.event.typ != yaml_NO_EVENT {
+ yaml_event_delete(&p.event)
+ }
+ yaml_parser_delete(&p.parser)
+}
+
+// expect consumes an event from the event stream and
+// checks that it's of the expected type.
+func (p *parser) expect(e yaml_event_type_t) {
+ if p.event.typ == yaml_NO_EVENT {
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ }
+ if p.event.typ == yaml_STREAM_END_EVENT {
+ failf("attempted to go past the end of stream; corrupted value?")
+ }
+ if p.event.typ != e {
+ p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
+ p.fail()
+ }
+ yaml_event_delete(&p.event)
+ p.event.typ = yaml_NO_EVENT
+}
+
+// peek peeks at the next event in the event stream,
+// puts the results into p.event and returns the event type.
+func (p *parser) peek() yaml_event_type_t {
+ if p.event.typ != yaml_NO_EVENT {
+ return p.event.typ
+ }
+ if !yaml_parser_parse(&p.parser, &p.event) {
+ p.fail()
+ }
+ return p.event.typ
+}
+
+func (p *parser) fail() {
+ var where string
+ var line int
+ if p.parser.context_mark.line != 0 {
+ line = p.parser.context_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ } else if p.parser.problem_mark.line != 0 {
+ line = p.parser.problem_mark.line
+ // Scanner errors don't iterate line before returning error
+ if p.parser.error == yaml_SCANNER_ERROR {
+ line++
+ }
+ }
+ if line != 0 {
+ where = "line " + strconv.Itoa(line) + ": "
+ }
+ var msg string
+ if len(p.parser.problem) > 0 {
+ msg = p.parser.problem
+ } else {
+ msg = "unknown problem parsing YAML content"
+ }
+ failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *Node, anchor []byte) {
+ if anchor != nil {
+ n.Anchor = string(anchor)
+ p.anchors[n.Anchor] = n
+ }
+}
+
+func (p *parser) parse() *Node {
+ p.init()
+ switch p.peek() {
+ case yaml_SCALAR_EVENT:
+ return p.scalar()
+ case yaml_ALIAS_EVENT:
+ return p.alias()
+ case yaml_MAPPING_START_EVENT:
+ return p.mapping()
+ case yaml_SEQUENCE_START_EVENT:
+ return p.sequence()
+ case yaml_DOCUMENT_START_EVENT:
+ return p.document()
+ case yaml_STREAM_END_EVENT:
+ // Happens when attempting to decode an empty buffer.
+ return nil
+ case yaml_TAIL_COMMENT_EVENT:
+ panic("internal error: unexpected tail comment event (please report)")
+ default:
+ panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String())
+ }
+}
+
+func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node {
+ var style Style
+ if tag != "" && tag != "!" {
+ tag = shortTag(tag)
+ style = TaggedStyle
+ } else if defaultTag != "" {
+ tag = defaultTag
+ } else if kind == ScalarNode {
+ tag, _ = resolve("", value)
+ }
+ n := &Node{
+ Kind: kind,
+ Tag: tag,
+ Value: value,
+ Style: style,
+ }
+ if !p.textless {
+ n.Line = p.event.start_mark.line + 1
+ n.Column = p.event.start_mark.column + 1
+ n.HeadComment = string(p.event.head_comment)
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ }
+ return n
+}
+
+func (p *parser) parseChild(parent *Node) *Node {
+ child := p.parse()
+ parent.Content = append(parent.Content, child)
+ return child
+}
+
+func (p *parser) document() *Node {
+ n := p.node(DocumentNode, "", "", "")
+ p.doc = n
+ p.expect(yaml_DOCUMENT_START_EVENT)
+ p.parseChild(n)
+ if p.peek() == yaml_DOCUMENT_END_EVENT {
+ n.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_DOCUMENT_END_EVENT)
+ return n
+}
+
+func (p *parser) alias() *Node {
+ n := p.node(AliasNode, "", "", string(p.event.anchor))
+ n.Alias = p.anchors[n.Value]
+ if n.Alias == nil {
+ failf("unknown anchor '%s' referenced", n.Value)
+ }
+ p.expect(yaml_ALIAS_EVENT)
+ return n
+}
+
+func (p *parser) scalar() *Node {
+ var parsedStyle = p.event.scalar_style()
+ var nodeStyle Style
+ switch {
+ case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = DoubleQuotedStyle
+ case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0:
+ nodeStyle = SingleQuotedStyle
+ case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0:
+ nodeStyle = LiteralStyle
+ case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0:
+ nodeStyle = FoldedStyle
+ }
+ var nodeValue = string(p.event.value)
+ var nodeTag = string(p.event.tag)
+ var defaultTag string
+ if nodeStyle == 0 {
+ if nodeValue == "<<" {
+ defaultTag = mergeTag
+ }
+ } else {
+ defaultTag = strTag
+ }
+ n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue)
+ n.Style |= nodeStyle
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SCALAR_EVENT)
+ return n
+}
+
+func (p *parser) sequence() *Node {
+ n := p.node(SequenceNode, seqTag, string(p.event.tag), "")
+ if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 {
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_SEQUENCE_START_EVENT)
+ for p.peek() != yaml_SEQUENCE_END_EVENT {
+ p.parseChild(n)
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ p.expect(yaml_SEQUENCE_END_EVENT)
+ return n
+}
+
+func (p *parser) mapping() *Node {
+ n := p.node(MappingNode, mapTag, string(p.event.tag), "")
+ block := true
+ if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 {
+ block = false
+ n.Style |= FlowStyle
+ }
+ p.anchor(n, p.event.anchor)
+ p.expect(yaml_MAPPING_START_EVENT)
+ for p.peek() != yaml_MAPPING_END_EVENT {
+ k := p.parseChild(n)
+ if block && k.FootComment != "" {
+ // Must be a foot comment for the prior value when being dedented.
+ if len(n.Content) > 2 {
+ n.Content[len(n.Content)-3].FootComment = k.FootComment
+ k.FootComment = ""
+ }
+ }
+ v := p.parseChild(n)
+ if k.FootComment == "" && v.FootComment != "" {
+ k.FootComment = v.FootComment
+ v.FootComment = ""
+ }
+ if p.peek() == yaml_TAIL_COMMENT_EVENT {
+ if k.FootComment == "" {
+ k.FootComment = string(p.event.foot_comment)
+ }
+ p.expect(yaml_TAIL_COMMENT_EVENT)
+ }
+ }
+ n.LineComment = string(p.event.line_comment)
+ n.FootComment = string(p.event.foot_comment)
+ if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 {
+ n.Content[len(n.Content)-2].FootComment = n.FootComment
+ n.FootComment = ""
+ }
+ p.expect(yaml_MAPPING_END_EVENT)
+ return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+ doc *Node
+ aliases map[*Node]bool
+ terrors []string
+
+ stringMapType reflect.Type
+ generalMapType reflect.Type
+
+ knownFields bool
+ uniqueKeys bool
+ decodeCount int
+ aliasCount int
+ aliasDepth int
+}
+
+var (
+ nodeType = reflect.TypeOf(Node{})
+ durationType = reflect.TypeOf(time.Duration(0))
+ stringMapType = reflect.TypeOf(map[string]interface{}{})
+ generalMapType = reflect.TypeOf(map[interface{}]interface{}{})
+ ifaceType = generalMapType.Elem()
+ timeType = reflect.TypeOf(time.Time{})
+ ptrTimeType = reflect.TypeOf(&time.Time{})
+)
+
+func newDecoder() *decoder {
+ d := &decoder{
+ stringMapType: stringMapType,
+ generalMapType: generalMapType,
+ uniqueKeys: true,
+ }
+ d.aliases = make(map[*Node]bool)
+ return d
+}
+
+func (d *decoder) terror(n *Node, tag string, out reflect.Value) {
+ if n.Tag != "" {
+ tag = n.Tag
+ }
+ value := n.Value
+ if tag != seqTag && tag != mapTag {
+ if len(value) > 10 {
+ value = " `" + value[:7] + "...`"
+ } else {
+ value = " `" + value + "`"
+ }
+ }
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) {
+ err := u.UnmarshalYAML(n)
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) {
+ terrlen := len(d.terrors)
+ err := u.UnmarshalYAML(func(v interface{}) (err error) {
+ defer handleErr(&err)
+ d.unmarshal(n, reflect.ValueOf(v))
+ if len(d.terrors) > terrlen {
+ issues := d.terrors[terrlen:]
+ d.terrors = d.terrors[:terrlen]
+ return &TypeError{issues}
+ }
+ return nil
+ })
+ if e, ok := err.(*TypeError); ok {
+ d.terrors = append(d.terrors, e.Errors...)
+ return false
+ }
+ if err != nil {
+ fail(err)
+ }
+ return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+ if n.ShortTag() == nullTag {
+ return out, false, false
+ }
+ again := true
+ for again {
+ again = false
+ if out.Kind() == reflect.Ptr {
+ if out.IsNil() {
+ out.Set(reflect.New(out.Type().Elem()))
+ }
+ out = out.Elem()
+ again = true
+ }
+ if out.CanAddr() {
+ outi := out.Addr().Interface()
+ if u, ok := outi.(Unmarshaler); ok {
+ good = d.callUnmarshaler(n, u)
+ return out, true, good
+ }
+ if u, ok := outi.(obsoleteUnmarshaler); ok {
+ good = d.callObsoleteUnmarshaler(n, u)
+ return out, true, good
+ }
+ }
+ }
+ return out, false, false
+}
+
+func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) {
+ if n.ShortTag() == nullTag {
+ return reflect.Value{}
+ }
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+const (
+ // 400,000 decode operations is ~500kb of dense object declarations, or
+ // ~5kb of dense object declarations with 10000% alias expansion
+ alias_ratio_range_low = 400000
+
+ // 4,000,000 decode operations is ~5MB of dense object declarations, or
+ // ~4.5MB of dense object declarations with 10% alias expansion
+ alias_ratio_range_high = 4000000
+
+ // alias_ratio_range is the range over which we scale allowed alias ratios
+ alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
+)
+
+func allowedAliasRatio(decodeCount int) float64 {
+ switch {
+ case decodeCount <= alias_ratio_range_low:
+ // allow 99% to come from alias expansion for small-to-medium documents
+ return 0.99
+ case decodeCount >= alias_ratio_range_high:
+ // allow 10% to come from alias expansion for very large documents
+ return 0.10
+ default:
+ // scale smoothly from 99% down to 10% over the range.
+ // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
+ // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
+ return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
+ }
+}
+
+func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) {
+ d.decodeCount++
+ if d.aliasDepth > 0 {
+ d.aliasCount++
+ }
+ if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
+ failf("document contains excessive aliasing")
+ }
+ if out.Type() == nodeType {
+ out.Set(reflect.ValueOf(n).Elem())
+ return true
+ }
+ switch n.Kind {
+ case DocumentNode:
+ return d.document(n, out)
+ case AliasNode:
+ return d.alias(n, out)
+ }
+ out, unmarshaled, good := d.prepare(n, out)
+ if unmarshaled {
+ return good
+ }
+ switch n.Kind {
+ case ScalarNode:
+ good = d.scalar(n, out)
+ case MappingNode:
+ good = d.mapping(n, out)
+ case SequenceNode:
+ good = d.sequence(n, out)
+ case 0:
+ if n.IsZero() {
+ return d.null(out)
+ }
+ fallthrough
+ default:
+ failf("cannot decode node with unknown kind %d", n.Kind)
+ }
+ return good
+}
+
+func (d *decoder) document(n *Node, out reflect.Value) (good bool) {
+ if len(n.Content) == 1 {
+ d.doc = n
+ d.unmarshal(n.Content[0], out)
+ return true
+ }
+ return false
+}
+
+func (d *decoder) alias(n *Node, out reflect.Value) (good bool) {
+ if d.aliases[n] {
+ // TODO this could actually be allowed in some circumstances.
+ failf("anchor '%s' value contains itself", n.Value)
+ }
+ d.aliases[n] = true
+ d.aliasDepth++
+ good = d.unmarshal(n.Alias, out)
+ d.aliasDepth--
+ delete(d.aliases, n)
+ return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+ for _, k := range out.MapKeys() {
+ out.SetMapIndex(k, zeroValue)
+ }
+}
+
+func (d *decoder) null(out reflect.Value) bool {
+ if out.CanAddr() {
+ switch out.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+ out.Set(reflect.Zero(out.Type()))
+ return true
+ }
+ }
+ return false
+}
+
+func (d *decoder) scalar(n *Node, out reflect.Value) bool {
+ var tag string
+ var resolved interface{}
+ if n.indicatedString() {
+ tag = strTag
+ resolved = n.Value
+ } else {
+ tag, resolved = resolve(n.Tag, n.Value)
+ if tag == binaryTag {
+ data, err := base64.StdEncoding.DecodeString(resolved.(string))
+ if err != nil {
+ failf("!!binary value contains invalid base64 data")
+ }
+ resolved = string(data)
+ }
+ }
+ if resolved == nil {
+ return d.null(out)
+ }
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ // We've resolved to exactly the type we want, so use that.
+ out.Set(resolvedv)
+ return true
+ }
+ // Perhaps we can use the value as a TextUnmarshaler to
+ // set its value.
+ if out.CanAddr() {
+ u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
+ if ok {
+ var text []byte
+ if tag == binaryTag {
+ text = []byte(resolved.(string))
+ } else {
+ // We let any value be unmarshaled into TextUnmarshaler.
+ // That might be more lax than we'd like, but the
+ // TextUnmarshaler itself should bowl out any dubious values.
+ text = []byte(n.Value)
+ }
+ err := u.UnmarshalText(text)
+ if err != nil {
+ fail(err)
+ }
+ return true
+ }
+ }
+ switch out.Kind() {
+ case reflect.String:
+ if tag == binaryTag {
+ out.SetString(resolved.(string))
+ return true
+ }
+ out.SetString(n.Value)
+ return true
+ case reflect.Interface:
+ out.Set(reflect.ValueOf(resolved))
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ // This used to work in v2, but it's very unfriendly.
+ isDuration := out.Type() == durationType
+
+ switch resolved := resolved.(type) {
+ case int:
+ if !isDuration && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case int64:
+ if !isDuration && !out.OverflowInt(resolved) {
+ out.SetInt(resolved)
+ return true
+ }
+ case uint64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case float64:
+ if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+ out.SetInt(int64(resolved))
+ return true
+ }
+ case string:
+ if out.Type() == durationType {
+ d, err := time.ParseDuration(resolved)
+ if err == nil {
+ out.SetInt(int64(d))
+ return true
+ }
+ }
+ }
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ switch resolved := resolved.(type) {
+ case int:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case int64:
+ if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case uint64:
+ if !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ case float64:
+ if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+ out.SetUint(uint64(resolved))
+ return true
+ }
+ }
+ case reflect.Bool:
+ switch resolved := resolved.(type) {
+ case bool:
+ out.SetBool(resolved)
+ return true
+ case string:
+ // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html).
+ // It only works if explicitly attempting to unmarshal into a typed bool value.
+ switch resolved {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON":
+ out.SetBool(true)
+ return true
+ case "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ out.SetBool(false)
+ return true
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ switch resolved := resolved.(type) {
+ case int:
+ out.SetFloat(float64(resolved))
+ return true
+ case int64:
+ out.SetFloat(float64(resolved))
+ return true
+ case uint64:
+ out.SetFloat(float64(resolved))
+ return true
+ case float64:
+ out.SetFloat(resolved)
+ return true
+ }
+ case reflect.Struct:
+ if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
+ out.Set(resolvedv)
+ return true
+ }
+ case reflect.Ptr:
+ panic("yaml internal error: please report the issue")
+ }
+ d.terror(n, tag, out)
+ return false
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+ v := reflect.ValueOf(i)
+ sv := reflect.New(v.Type()).Elem()
+ sv.Set(v)
+ return sv
+}
+
+func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+
+ var iface reflect.Value
+ switch out.Kind() {
+ case reflect.Slice:
+ out.Set(reflect.MakeSlice(out.Type(), l, l))
+ case reflect.Array:
+ if l != out.Len() {
+ failf("invalid array: want %d elements but got %d", out.Len(), l)
+ }
+ case reflect.Interface:
+ // No type hints. Will have to use a generic sequence.
+ iface = out
+ out = settableValueOf(make([]interface{}, l))
+ default:
+ d.terror(n, seqTag, out)
+ return false
+ }
+ et := out.Type().Elem()
+
+ j := 0
+ for i := 0; i < l; i++ {
+ e := reflect.New(et).Elem()
+ if ok := d.unmarshal(n.Content[i], e); ok {
+ out.Index(j).Set(e)
+ j++
+ }
+ }
+ if out.Kind() != reflect.Array {
+ out.Set(out.Slice(0, j))
+ }
+ if iface.IsValid() {
+ iface.Set(out)
+ }
+ return true
+}
+
+func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) {
+ l := len(n.Content)
+ if d.uniqueKeys {
+ nerrs := len(d.terrors)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ for j := i + 2; j < l; j += 2 {
+ nj := n.Content[j]
+ if ni.Kind == nj.Kind && ni.Value == nj.Value {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line))
+ }
+ }
+ }
+ if len(d.terrors) > nerrs {
+ return false
+ }
+ }
+ switch out.Kind() {
+ case reflect.Struct:
+ return d.mappingStruct(n, out)
+ case reflect.Map:
+ // okay
+ case reflect.Interface:
+ iface := out
+ if isStringMap(n) {
+ out = reflect.MakeMap(d.stringMapType)
+ } else {
+ out = reflect.MakeMap(d.generalMapType)
+ }
+ iface.Set(out)
+ default:
+ d.terror(n, mapTag, out)
+ return false
+ }
+
+ outt := out.Type()
+ kt := outt.Key()
+ et := outt.Elem()
+
+ stringMapType := d.stringMapType
+ generalMapType := d.generalMapType
+ if outt.Elem() == ifaceType {
+ if outt.Key().Kind() == reflect.String {
+ d.stringMapType = outt
+ } else if outt.Key() == ifaceType {
+ d.generalMapType = outt
+ }
+ }
+
+ mapIsNew := false
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(outt))
+ mapIsNew = true
+ }
+ for i := 0; i < l; i += 2 {
+ if isMerge(n.Content[i]) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ k := reflect.New(kt).Elem()
+ if d.unmarshal(n.Content[i], k) {
+ kkind := k.Kind()
+ if kkind == reflect.Interface {
+ kkind = k.Elem().Kind()
+ }
+ if kkind == reflect.Map || kkind == reflect.Slice {
+ failf("invalid map key: %#v", k.Interface())
+ }
+ e := reflect.New(et).Elem()
+ if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) {
+ out.SetMapIndex(k, e)
+ }
+ }
+ }
+ d.stringMapType = stringMapType
+ d.generalMapType = generalMapType
+ return true
+}
+
+func isStringMap(n *Node) bool {
+ if n.Kind != MappingNode {
+ return false
+ }
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ if n.Content[i].ShortTag() != strTag {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) {
+ sinfo, err := getStructInfo(out.Type())
+ if err != nil {
+ panic(err)
+ }
+
+ var inlineMap reflect.Value
+ var elemType reflect.Type
+ if sinfo.InlineMap != -1 {
+ inlineMap = out.Field(sinfo.InlineMap)
+ inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+ elemType = inlineMap.Type().Elem()
+ }
+
+ for _, index := range sinfo.InlineUnmarshalers {
+ field := d.fieldByIndex(n, out, index)
+ d.prepare(n, field)
+ }
+
+ var doneFields []bool
+ if d.uniqueKeys {
+ doneFields = make([]bool, len(sinfo.FieldsList))
+ }
+ name := settableValueOf("")
+ l := len(n.Content)
+ for i := 0; i < l; i += 2 {
+ ni := n.Content[i]
+ if isMerge(ni) {
+ d.merge(n.Content[i+1], out)
+ continue
+ }
+ if !d.unmarshal(ni, name) {
+ continue
+ }
+ if info, ok := sinfo.FieldsMap[name.String()]; ok {
+ if d.uniqueKeys {
+ if doneFields[info.Id] {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type()))
+ continue
+ }
+ doneFields[info.Id] = true
+ }
+ var field reflect.Value
+ if info.Inline == nil {
+ field = out.Field(info.Num)
+ } else {
+ field = d.fieldByIndex(n, out, info.Inline)
+ }
+ d.unmarshal(n.Content[i+1], field)
+ } else if sinfo.InlineMap != -1 {
+ if inlineMap.IsNil() {
+ inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+ }
+ value := reflect.New(elemType).Elem()
+ d.unmarshal(n.Content[i+1], value)
+ inlineMap.SetMapIndex(name, value)
+ } else if d.knownFields {
+ d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type()))
+ }
+ }
+ return true
+}
+
+func failWantMap() {
+ failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *Node, out reflect.Value) {
+ switch n.Kind {
+ case MappingNode:
+ d.unmarshal(n, out)
+ case AliasNode:
+ if n.Alias != nil && n.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(n, out)
+ case SequenceNode:
+ // Step backwards as earlier nodes take precedence.
+ for i := len(n.Content) - 1; i >= 0; i-- {
+ ni := n.Content[i]
+ if ni.Kind == AliasNode {
+ if ni.Alias != nil && ni.Alias.Kind != MappingNode {
+ failWantMap()
+ }
+ } else if ni.Kind != MappingNode {
+ failWantMap()
+ }
+ d.unmarshal(ni, out)
+ }
+ default:
+ failWantMap()
+ }
+}
+
+func isMerge(n *Node) bool {
+ return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag)
+}
diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go
new file mode 100644
index 0000000..0f47c9c
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/emitterc.go
@@ -0,0 +1,2020 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos += 1
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 1
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos+0] = '\r'
+ emitter.buffer[emitter.buffer_pos+1] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ panic("unknown line break setting")
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and below and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+ return false
+ }
+ p := emitter.buffer_pos
+ w := width(s[*i])
+ switch w {
+ case 4:
+ emitter.buffer[p+3] = s[*i+3]
+ fallthrough
+ case 3:
+ emitter.buffer[p+2] = s[*i+2]
+ fallthrough
+ case 2:
+ emitter.buffer[p+1] = s[*i+1]
+ fallthrough
+ case 1:
+ emitter.buffer[p+0] = s[*i+0]
+ default:
+ panic("unknown character width")
+ }
+ emitter.column++
+ emitter.buffer_pos += w
+ *i += w
+ return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+ for i := 0; i < len(s); {
+ if !write(emitter, s, &i) {
+ return false
+ }
+ }
+ return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+ if s[*i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *i++
+ } else {
+ if !write(emitter, s, i) {
+ return false
+ }
+ if emitter.column == 0 {
+ emitter.space_above = true
+ }
+ emitter.column = 0
+ emitter.line++
+ // [Go] Do this here and above and drop from everywhere else (see commented lines).
+ emitter.indention = true
+ }
+ return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+// - 1 event for DOCUMENT-START
+// - 2 events for SEQUENCE-START
+// - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+ var accumulate int
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ break
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ break
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ break
+ default:
+ return false
+ }
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+ var level int
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].typ {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+ }
+ }
+
+ // [Go] Do we actually need to copy this given garbage collection
+ // and the lack of deallocating destructors?
+ tag_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(tag_copy.handle, value.handle)
+ copy(tag_copy.prefix, value.prefix)
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+ return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+ emitter.indents = append(emitter.indents, emitter.indent)
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ // [Go] This was changed so that indentations are more regular.
+ if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE {
+ // The first indent inside a sequence will just skip the "- " indicator.
+ emitter.indent += 2
+ } else {
+ // Everything else aligns to the chosen indentation.
+ emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent)
+ }
+ }
+ return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ default:
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false)
+
+ case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+ }
+ panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+ emitter.space_above = true
+ emitter.foot_indent = -1
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+ if event.typ == yaml_DOCUMENT_START_EVENT {
+
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := 0; i < len(default_tag_directives); i++ {
+ tag_directive := &default_tag_directives[i]
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := 0; i < len(event.tag_directives); i++ {
+ tag_directive := &event.tag_directives[i]
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+ if emitter.canonical || true {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if len(emitter.head_comment) > 0 {
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+ return true
+ }
+
+ if event.typ == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_END_STATE
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_emit_node(emitter, event, true, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if event.typ != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+ }
+ // [Go] Force document foot separation.
+ emitter.foot_indent = 0
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.foot_indent = -1
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ // [Go] Allocate the slice elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ if emitter.canonical && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.column == 0 || emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.typ == yaml_MAPPING_END_EVENT {
+ if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+
+ if !first && !trail {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+
+ if emitter.column == 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+ return false
+ }
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE)
+ } else {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ }
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if event.typ == yaml_SEQUENCE_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+ if !yaml_emitter_process_head_comment(emitter) {
+ return false
+ }
+ if event.typ == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if len(emitter.line_comment) > 0 {
+ // [Go] A line comment was provided for the key. That's unusual as the
+ // scanner associates line comments with the value. Either way,
+ // save the line comment and render it appropriately later.
+ emitter.key_line_comment = emitter.line_comment
+ emitter.line_comment = nil
+ }
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+ return false
+ }
+ }
+ if len(emitter.key_line_comment) > 0 {
+ // [Go] Line comments are generally associated with the value, but when there's
+ // no value on the same line as a mapping key they end up attached to the
+ // key itself.
+ if event.typ == yaml_SCALAR_EVENT {
+ if len(emitter.line_comment) == 0 {
+ // A scalar is coming and it has no line comments by itself yet,
+ // so just let it handle the line comment as usual. If it has a
+ // line comment, we can't have both so the one from the key is lost.
+ emitter.line_comment = emitter.key_line_comment
+ emitter.key_line_comment = nil
+ }
+ } else if event.sequence_style() != yaml_FLOW_SEQUENCE_STYLE && (event.typ == yaml_MAPPING_START_EVENT || event.typ == yaml_SEQUENCE_START_EVENT) {
+ // An indented block follows, so write the comment right now.
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+ if !yaml_emitter_emit_node(emitter, event, false, false, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_foot_comment(emitter) {
+ return false
+ }
+ return true
+}
+
+func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
+ }
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+ return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+ return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+ return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+ return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+ switch emitter.events[emitter.events_head].typ {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+ default:
+ return false
+ }
+ return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+ }
+
+ style := event.scalar_style()
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+ emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte{'!'}
+ }
+ emitter.scalar_data.style = style
+ return true
+}
+
+// Write an anchor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+ c := []byte{'&'}
+ if emitter.anchor_data.alias {
+ c[0] = '*'
+ }
+ if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+ return false
+ }
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ }
+ } else {
+ // [Go] Allocate these slices elsewhere.
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+ }
+ panic("unknown scalar style")
+}
+
+// Write a head comment.
+func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.tail_comment) > 0 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.tail_comment) {
+ return false
+ }
+ emitter.tail_comment = emitter.tail_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ }
+
+ if len(emitter.head_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.head_comment) {
+ return false
+ }
+ emitter.head_comment = emitter.head_comment[:0]
+ return true
+}
+
+// Write an line comment.
+func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.line_comment) == 0 {
+ return true
+ }
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.line_comment) {
+ return false
+ }
+ emitter.line_comment = emitter.line_comment[:0]
+ return true
+}
+
+// Write a foot comment.
+func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool {
+ if len(emitter.foot_comment) == 0 {
+ return true
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_comment(emitter, emitter.foot_comment) {
+ return false
+ }
+ emitter.foot_comment = emitter.foot_comment[:0]
+ emitter.foot_indent = emitter.indent
+ if emitter.foot_indent < 0 {
+ emitter.foot_indent = 0
+ }
+ return true
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+ }
+ return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+ }
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+ }
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+ }
+ for i := 1; i < len(handle)-1; i += width(handle[i]) {
+ if !is_alpha(handle, i) {
+ return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+ }
+ }
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+ }
+ return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ problem := "anchor value must not be empty"
+ if alias {
+ problem = "alias value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor, i) {
+ problem := "anchor value must contain alphanumerical characters only"
+ if alias {
+ problem = "alias value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, problem)
+ }
+ }
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+ return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+ }
+ for i := 0; i < len(emitter.tag_directives); i++ {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+ emitter.tag_data.suffix = tag
+ return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ var (
+ block_indicators = false
+ flow_indicators = false
+ line_breaks = false
+ special_characters = false
+ tab_characters = false
+
+ leading_space = false
+ leading_break = false
+ trailing_space = false
+ trailing_break = false
+ break_space = false
+ space_break = false
+
+ preceded_by_whitespace = false
+ followed_by_whitespace = false
+ previous_space = false
+ previous_break = false
+ )
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceded_by_whitespace = true
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if value[i] == '\t' {
+ tab_characters = true
+ } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+ special_characters = true
+ }
+ if is_space(value, i) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break(value, i) {
+ line_breaks = true
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+ preceded_by_whitespace = is_blankz(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || tab_characters || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+ if space_break || special_characters {
+ emitter.scalar_data.block_allowed = false
+ }
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+ return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ if len(event.head_comment) > 0 {
+ emitter.head_comment = event.head_comment
+ }
+ if len(event.line_comment) > 0 {
+ emitter.line_comment = event.line_comment
+ }
+ if len(event.foot_comment) > 0 {
+ emitter.foot_comment = event.foot_comment
+ }
+ if len(event.tail_comment) > 0 {
+ emitter.tail_comment = event.tail_comment
+ }
+
+ switch event.typ {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ pos := emitter.buffer_pos
+ emitter.buffer[pos+0] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+ if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if emitter.foot_indent == indent {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ emitter.whitespace = true
+ //emitter.indention = true
+ emitter.space_above = false
+ emitter.foot_indent = -1
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, indicator) {
+ return false
+ }
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ if !write_all(emitter, value) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+ for i := 0; i < len(value); {
+ var must_write bool
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+ must_write = true
+ default:
+ must_write = is_alpha(value, i)
+ }
+ if must_write {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for k := 0; k < w; k++ {
+ octet := value[i]
+ i++
+ if !put(emitter, '%') {
+ return false
+ }
+
+ c := octet >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = octet & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ if len(value) > 0 && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if len(value) > 0 {
+ emitter.whitespace = false
+ }
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+ return false
+ }
+
+ spaces := false
+ breaks := false
+ for i := 0; i < len(value); {
+ if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+ spaces := false
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+ is_bom(value, i) || is_break(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ var ok bool
+ switch v {
+ case 0x00:
+ ok = put(emitter, '0')
+ case 0x07:
+ ok = put(emitter, 'a')
+ case 0x08:
+ ok = put(emitter, 'b')
+ case 0x09:
+ ok = put(emitter, 't')
+ case 0x0A:
+ ok = put(emitter, 'n')
+ case 0x0b:
+ ok = put(emitter, 'v')
+ case 0x0c:
+ ok = put(emitter, 'f')
+ case 0x0d:
+ ok = put(emitter, 'r')
+ case 0x1b:
+ ok = put(emitter, 'e')
+ case 0x22:
+ ok = put(emitter, '"')
+ case 0x5c:
+ ok = put(emitter, '\\')
+ case 0x85:
+ ok = put(emitter, 'N')
+ case 0xA0:
+ ok = put(emitter, '_')
+ case 0x2028:
+ ok = put(emitter, 'L')
+ case 0x2029:
+ ok = put(emitter, 'P')
+ default:
+ if v <= 0xFF {
+ ok = put(emitter, 'x')
+ w = 2
+ } else if v <= 0xFFFF {
+ ok = put(emitter, 'u')
+ w = 4
+ } else {
+ ok = put(emitter, 'U')
+ w = 8
+ }
+ for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ if digit < 10 {
+ ok = put(emitter, digit+'0')
+ } else {
+ ok = put(emitter, digit+'A'-10)
+ }
+ }
+ }
+ if !ok {
+ return false
+ }
+ spaces = false
+ } else if is_space(value, i) {
+ if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value, i+1) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+ return false
+ }
+ emitter.whitespace = false
+ emitter.indention = false
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+ if is_space(value, 0) || is_break(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if !is_break(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ i--
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+ if is_break(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+ //emitter.indention = true
+ emitter.whitespace = true
+ breaks := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !yaml_emitter_process_line_comment(emitter) {
+ return false
+ }
+
+ //emitter.indention = true
+ emitter.whitespace = true
+
+ breaks := true
+ leading_spaces := true
+ for i := 0; i < len(value); {
+ if is_break(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := 0
+ for is_break(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value, i)
+ }
+ if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ return true
+}
+
+func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool {
+ breaks := false
+ pound := false
+ for i := 0; i < len(comment); {
+ if is_break(comment, i) {
+ if !write_break(emitter, comment, &i) {
+ return false
+ }
+ //emitter.indention = true
+ breaks = true
+ pound = false
+ } else {
+ if breaks && !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !pound {
+ if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) {
+ return false
+ }
+ pound = true
+ }
+ if !write(emitter, comment, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+ if !breaks && !put_break(emitter) {
+ return false
+ }
+
+ emitter.whitespace = true
+ //emitter.indention = true
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go
new file mode 100644
index 0000000..de9e72a
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/encode.go
@@ -0,0 +1,577 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding"
+ "fmt"
+ "io"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+)
+
+type encoder struct {
+ emitter yaml_emitter_t
+ event yaml_event_t
+ out []byte
+ flow bool
+ indent int
+ doneInit bool
+}
+
+func newEncoder() *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_string(&e.emitter, &e.out)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func newEncoderWithWriter(w io.Writer) *encoder {
+ e := &encoder{}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, w)
+ yaml_emitter_set_unicode(&e.emitter, true)
+ return e
+}
+
+func (e *encoder) init() {
+ if e.doneInit {
+ return
+ }
+ if e.indent == 0 {
+ e.indent = 4
+ }
+ e.emitter.best_indent = e.indent
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ e.doneInit = true
+}
+
+func (e *encoder) finish() {
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) destroy() {
+ yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+ // This will internally delete the e.event value.
+ e.must(yaml_emitter_emit(&e.emitter, &e.event))
+}
+
+func (e *encoder) must(ok bool) {
+ if !ok {
+ msg := e.emitter.problem
+ if msg == "" {
+ msg = "unknown problem generating YAML content"
+ }
+ failf("%s", msg)
+ }
+}
+
+func (e *encoder) marshalDoc(tag string, in reflect.Value) {
+ e.init()
+ var node *Node
+ if in.IsValid() {
+ node, _ = in.Interface().(*Node)
+ }
+ if node != nil && node.Kind == DocumentNode {
+ e.nodev(in)
+ } else {
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+ e.marshal(tag, in)
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ }
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+ tag = shortTag(tag)
+ if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
+ e.nilv()
+ return
+ }
+ iface := in.Interface()
+ switch value := iface.(type) {
+ case *Node:
+ e.nodev(in)
+ return
+ case Node:
+ if !in.CanAddr() {
+ var n = reflect.New(in.Type()).Elem()
+ n.Set(in)
+ in = n
+ }
+ e.nodev(in.Addr())
+ return
+ case time.Time:
+ e.timev(tag, in)
+ return
+ case *time.Time:
+ e.timev(tag, in.Elem())
+ return
+ case time.Duration:
+ e.stringv(tag, reflect.ValueOf(value.String()))
+ return
+ case Marshaler:
+ v, err := value.MarshalYAML()
+ if err != nil {
+ fail(err)
+ }
+ if v == nil {
+ e.nilv()
+ return
+ }
+ e.marshal(tag, reflect.ValueOf(v))
+ return
+ case encoding.TextMarshaler:
+ text, err := value.MarshalText()
+ if err != nil {
+ fail(err)
+ }
+ in = reflect.ValueOf(string(text))
+ case nil:
+ e.nilv()
+ return
+ }
+ switch in.Kind() {
+ case reflect.Interface:
+ e.marshal(tag, in.Elem())
+ case reflect.Map:
+ e.mapv(tag, in)
+ case reflect.Ptr:
+ e.marshal(tag, in.Elem())
+ case reflect.Struct:
+ e.structv(tag, in)
+ case reflect.Slice, reflect.Array:
+ e.slicev(tag, in)
+ case reflect.String:
+ e.stringv(tag, in)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.intv(tag, in)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.uintv(tag, in)
+ case reflect.Float32, reflect.Float64:
+ e.floatv(tag, in)
+ case reflect.Bool:
+ e.boolv(tag, in)
+ default:
+ panic("cannot marshal type: " + in.Type().String())
+ }
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+ e.mappingv(tag, func() {
+ keys := keyList(in.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k)
+ e.marshal("", in.MapIndex(k))
+ }
+ })
+}
+
+func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) {
+ for _, num := range index {
+ for {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ continue
+ }
+ break
+ }
+ v = v.Field(num)
+ }
+ return v
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+ sinfo, err := getStructInfo(in.Type())
+ if err != nil {
+ panic(err)
+ }
+ e.mappingv(tag, func() {
+ for _, info := range sinfo.FieldsList {
+ var value reflect.Value
+ if info.Inline == nil {
+ value = in.Field(info.Num)
+ } else {
+ value = e.fieldByIndex(in, info.Inline)
+ if !value.IsValid() {
+ continue
+ }
+ }
+ if info.OmitEmpty && isZero(value) {
+ continue
+ }
+ e.marshal("", reflect.ValueOf(info.Key))
+ e.flow = info.Flow
+ e.marshal("", value)
+ }
+ if sinfo.InlineMap >= 0 {
+ m := in.Field(sinfo.InlineMap)
+ if m.Len() > 0 {
+ e.flow = false
+ keys := keyList(m.MapKeys())
+ sort.Sort(keys)
+ for _, k := range keys {
+ if _, found := sinfo.FieldsMap[k.String()]; found {
+ panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String()))
+ }
+ e.marshal("", k)
+ e.flow = false
+ e.marshal("", m.MapIndex(k))
+ }
+ }
+ }
+ })
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+ f()
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+ e.emit()
+ n := in.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", in.Index(i))
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+ // Fast path.
+ if s == "" {
+ return false
+ }
+ c := s[0]
+ if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+ return false
+ }
+ // Do the full match.
+ return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+// isOldBool returns whether s is bool notation as defined in YAML 1.1.
+//
+// We continue to force strings that YAML 1.1 would interpret as booleans to be
+// rendered as quotes strings so that the marshalled output valid for YAML 1.1
+// parsing.
+func isOldBool(s string) (result bool) {
+ switch s {
+ case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON",
+ "n", "N", "no", "No", "NO", "off", "Off", "OFF":
+ return true
+ default:
+ return false
+ }
+}
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+ var style yaml_scalar_style_t
+ s := in.String()
+ canUsePlain := true
+ switch {
+ case !utf8.ValidString(s):
+ if tag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if tag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ s = encodeBase64(s)
+ case tag == "":
+ // Check to see if it would resolve to a specific
+ // tag when encoded unquoted. If it doesn't,
+ // there's no need to quote it.
+ rtag, _ := resolve("", s)
+ canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s))
+ }
+ // Note: it's possible for user code to emit invalid YAML
+ // if they explicitly specify a tag and a string containing
+ // text that's incompatible with that tag.
+ switch {
+ case strings.Contains(s, "\n"):
+ if e.flow {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else {
+ style = yaml_LITERAL_SCALAR_STYLE
+ }
+ case canUsePlain:
+ style = yaml_PLAIN_SCALAR_STYLE
+ default:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ e.emitScalar(s, "", tag, style, nil, nil, nil, nil)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+ var s string
+ if in.Bool() {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+ s := strconv.FormatInt(in.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+ s := strconv.FormatUint(in.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) timev(tag string, in reflect.Value) {
+ t := in.Interface().(time.Time)
+ s := t.Format(time.RFC3339Nano)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+ // Issue #352: When formatting, use the precision of the underlying value
+ precision := 64
+ if in.Kind() == reflect.Float32 {
+ precision = 32
+ }
+
+ s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) nilv() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) {
+ // TODO Kill this function. Replace all initialize calls by their underlining Go literals.
+ implicit := tag == ""
+ if !implicit {
+ tag = longTag(tag)
+ }
+ e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+ e.event.head_comment = head
+ e.event.line_comment = line
+ e.event.foot_comment = foot
+ e.event.tail_comment = tail
+ e.emit()
+}
+
+func (e *encoder) nodev(in reflect.Value) {
+ e.node(in.Interface().(*Node), "")
+}
+
+func (e *encoder) node(node *Node, tail string) {
+ // Zero nodes behave as nil.
+ if node.Kind == 0 && node.IsZero() {
+ e.nilv()
+ return
+ }
+
+ // If the tag was not explicitly requested, and dropping it won't change the
+ // implicit tag of the value, don't include it in the presentation.
+ var tag = node.Tag
+ var stag = shortTag(tag)
+ var forceQuoting bool
+ if tag != "" && node.Style&TaggedStyle == 0 {
+ if node.Kind == ScalarNode {
+ if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 {
+ tag = ""
+ } else {
+ rtag, _ := resolve("", node.Value)
+ if rtag == stag {
+ tag = ""
+ } else if stag == strTag {
+ tag = ""
+ forceQuoting = true
+ }
+ }
+ } else {
+ var rtag string
+ switch node.Kind {
+ case MappingNode:
+ rtag = mapTag
+ case SequenceNode:
+ rtag = seqTag
+ }
+ if rtag == stag {
+ tag = ""
+ }
+ }
+ }
+
+ switch node.Kind {
+ case DocumentNode:
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ yaml_document_end_event_initialize(&e.event, true)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case SequenceNode:
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+ for _, node := range node.Content {
+ e.node(node, "")
+ }
+ e.must(yaml_sequence_end_event_initialize(&e.event))
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case MappingNode:
+ style := yaml_BLOCK_MAPPING_STYLE
+ if node.Style&FlowStyle != 0 {
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)
+ e.event.tail_comment = []byte(tail)
+ e.event.head_comment = []byte(node.HeadComment)
+ e.emit()
+
+ // The tail logic below moves the foot comment of prior keys to the following key,
+ // since the value for each key may be a nested structure and the foot needs to be
+ // processed only the entirety of the value is streamed. The last tail is processed
+ // with the mapping end event.
+ var tail string
+ for i := 0; i+1 < len(node.Content); i += 2 {
+ k := node.Content[i]
+ foot := k.FootComment
+ if foot != "" {
+ kopy := *k
+ kopy.FootComment = ""
+ k = &kopy
+ }
+ e.node(k, tail)
+ tail = foot
+
+ v := node.Content[i+1]
+ e.node(v, "")
+ }
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.event.tail_comment = []byte(tail)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case AliasNode:
+ yaml_alias_event_initialize(&e.event, []byte(node.Value))
+ e.event.head_comment = []byte(node.HeadComment)
+ e.event.line_comment = []byte(node.LineComment)
+ e.event.foot_comment = []byte(node.FootComment)
+ e.emit()
+
+ case ScalarNode:
+ value := node.Value
+ if !utf8.ValidString(value) {
+ if stag == binaryTag {
+ failf("explicitly tagged !!binary data must be base64-encoded")
+ }
+ if stag != "" {
+ failf("cannot marshal invalid UTF-8 data as %s", stag)
+ }
+ // It can't be encoded directly as YAML so use a binary tag
+ // and encode it as base64.
+ tag = binaryTag
+ value = encodeBase64(value)
+ }
+
+ style := yaml_PLAIN_SCALAR_STYLE
+ switch {
+ case node.Style&DoubleQuotedStyle != 0:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ case node.Style&SingleQuotedStyle != 0:
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ case node.Style&LiteralStyle != 0:
+ style = yaml_LITERAL_SCALAR_STYLE
+ case node.Style&FoldedStyle != 0:
+ style = yaml_FOLDED_SCALAR_STYLE
+ case strings.Contains(value, "\n"):
+ style = yaml_LITERAL_SCALAR_STYLE
+ case forceQuoting:
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail))
+ default:
+ failf("cannot encode node with unknown kind %d", node.Kind)
+ }
+}
diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod
new file mode 100644
index 0000000..f407ea3
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/go.mod
@@ -0,0 +1,5 @@
+module "gopkg.in/yaml.v3"
+
+require (
+ "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
+)
diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go
new file mode 100644
index 0000000..ac66fcc
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/parserc.go
@@ -0,0 +1,1249 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document ::= block_node DOCUMENT-END*
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// | properties (block_content | indentless_block_sequence)?
+// | block_content
+// | indentless_block_sequence
+// block_node ::= ALIAS
+// | properties block_content?
+// | block_content
+// flow_node ::= ALIAS
+// | properties flow_content?
+// | flow_content
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content ::= block_collection | flow_collection | SCALAR
+// flow_content ::= flow_collection | SCALAR
+// block_collection ::= block_sequence | block_mapping
+// flow_collection ::= flow_sequence | flow_mapping
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// block_mapping ::= BLOCK-MAPPING_START
+// ((KEY block_node_or_indentless_sequence?)?
+// (VALUE block_node_or_indentless_sequence?)?)*
+// BLOCK-END
+// flow_sequence ::= FLOW-SEQUENCE-START
+// (flow_sequence_entry FLOW-ENTRY)*
+// flow_sequence_entry?
+// FLOW-SEQUENCE-END
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping ::= FLOW-MAPPING-START
+// (flow_mapping_entry FLOW-ENTRY)*
+// flow_mapping_entry?
+// FLOW-MAPPING-END
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ token := &parser.tokens[parser.tokens_head]
+ yaml_parser_unfold_comments(parser, token)
+ return token
+ }
+ return nil
+}
+
+// yaml_parser_unfold_comments walks through the comments queue and joins all
+// comments behind the position of the provided token into the respective
+// top-level comment slices in the parser.
+func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) {
+ for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index {
+ comment := &parser.comments[parser.comments_head]
+ if len(comment.head) > 0 {
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ // No heads on ends, so keep comment.head for a follow up token.
+ break
+ }
+ if len(parser.head_comment) > 0 {
+ parser.head_comment = append(parser.head_comment, '\n')
+ }
+ parser.head_comment = append(parser.head_comment, comment.head...)
+ }
+ if len(comment.foot) > 0 {
+ if len(parser.foot_comment) > 0 {
+ parser.foot_comment = append(parser.foot_comment, '\n')
+ }
+ parser.foot_comment = append(parser.foot_comment, comment.foot...)
+ }
+ if len(comment.line) > 0 {
+ if len(parser.line_comment) > 0 {
+ parser.line_comment = append(parser.line_comment, '\n')
+ }
+ parser.line_comment = append(parser.line_comment, comment.line...)
+ }
+ *comment = yaml_comment_t{}
+ parser.comments_head++
+ }
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ // Erase the event object.
+ *event = yaml_event_t{}
+
+ // No events after the end of the stream or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ // Generate the next event.
+ return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+ return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ //trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+ default:
+ panic("invalid parser state")
+ }
+}
+
+// Parse the production:
+// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+ }
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // Parse extra document end indicators.
+ if !implicit {
+ for token.typ == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.typ != yaml_DOCUMENT_START_TOKEN &&
+ token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an implicit document.
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ var head_comment []byte
+ if len(parser.head_comment) > 0 {
+ // [Go] Scan the header comment backwards, and if an empty line is found, break
+ // the header so the part before the last empty line goes into the
+ // document header, while the bottom of it goes into a follow up event.
+ for i := len(parser.head_comment) - 1; i > 0; i-- {
+ if parser.head_comment[i] == '\n' {
+ if i == len(parser.head_comment)-1 {
+ head_comment = parser.head_comment[:i]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ } else if parser.head_comment[i-1] == '\n' {
+ head_comment = parser.head_comment[:i-1]
+ parser.head_comment = parser.head_comment[i+1:]
+ break
+ }
+ }
+ }
+ }
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+
+ head_comment: head_comment,
+ }
+
+ } else if token.typ != yaml_STREAM_END_TOKEN {
+ // Parse an explicit document.
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected <document start>", token.start_mark)
+ return false
+ }
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+
+ } else {
+ // Parse the stream end.
+ parser.state = yaml_PARSE_END_STATE
+ *event = yaml_event_t{
+ typ: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+
+ return true
+}
+
+// Parse the productions:
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.typ == yaml_DOCUMENT_START_TOKEN ||
+ token.typ == yaml_DOCUMENT_END_TOKEN ||
+ token.typ == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document ::= block_node DOCUMENT-END*
+// *************
+// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ implicit := true
+ if token.typ == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ typ: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ if len(event.head_comment) > 0 && len(event.foot_comment) == 0 {
+ event.foot_comment = event.head_comment
+ event.head_comment = nil
+ }
+ return true
+}
+
+func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) {
+ event.head_comment = parser.head_comment
+ event.line_comment = parser.line_comment
+ event.foot_comment = parser.foot_comment
+ parser.head_comment = nil
+ parser.line_comment = nil
+ parser.foot_comment = nil
+ parser.tail_comment = nil
+ parser.stem_comment = nil
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence ::=
+// ALIAS
+// *****
+// | properties (block_content | indentless_block_sequence)?
+// ********** *
+// | block_content | indentless_block_sequence
+// *
+// block_node ::= ALIAS
+// *****
+// | properties block_content?
+// ********** *
+// | block_content
+// *
+// flow_node ::= ALIAS
+// *****
+// | properties flow_content?
+// ********** *
+// | flow_content
+// *
+// properties ::= TAG ANCHOR? | ANCHOR TAG?
+// *************************
+// block_content ::= block_collection | flow_collection | SCALAR
+// ******
+// flow_content ::= flow_collection | SCALAR
+// ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+ //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ *event = yaml_event_t{
+ typ: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ start_mark := token.start_mark
+ end_mark := token.start_mark
+
+ var tag_token bool
+ var tag_handle, tag_suffix, anchor []byte
+ var tag_mark yaml_mark_t
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.typ == yaml_TAG_TOKEN {
+ tag_token = true
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark = token.start_mark
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ var tag []byte
+ if tag_token {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+ tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+ tag = append(tag, tag_suffix...)
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ return true
+ }
+ if token.typ == yaml_SCALAR_TOKEN {
+ var plain_implicit, quoted_implicit bool
+ end_mark = token.end_mark
+ if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+ if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+ // [Go] Some of the events below can be merged as they differ only on style.
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ yaml_parser_set_event_comments(parser, event)
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ if parser.stem_comment != nil {
+ event.head_comment = parser.stem_comment
+ parser.stem_comment = nil
+ }
+ return true
+ }
+ if len(anchor) > 0 || len(tag) > 0 {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ }
+
+ context := "while parsing a flow node"
+ if block {
+ context = "while parsing a block node"
+ }
+ yaml_parser_set_parser_error_context(parser, context, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// ******************** *********** * *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ }
+ if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", context_mark,
+ "did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+// *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ prior_head_len := len(parser.head_comment)
+ skip_token(parser)
+ yaml_parser_split_stem_comment(parser, prior_head_len)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+ token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
+ }
+ return true
+}
+
+// Split stem comment from head comment.
+//
+// When a sequence or map is found under a sequence entry, the former head comment
+// is assigned to the underlying sequence or map as a whole, not the individual
+// sequence or map entry as would be expected otherwise. To handle this case the
+// previous head comment is moved aside as the stem comment.
+func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) {
+ if stem_len == 0 {
+ return
+ }
+
+ token := peek_token(parser)
+ if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN {
+ return
+ }
+
+ parser.stem_comment = parser.head_comment[:stem_len]
+ if len(parser.head_comment) == stem_len {
+ parser.head_comment = nil
+ } else {
+ // Copy suffix to prevent very strange bugs if someone ever appends
+ // further bytes to the prefix in the stem_comment slice above.
+ parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...)
+ }
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+// *******************
+// ((KEY block_node_or_indentless_sequence?)?
+// *** *
+// (VALUE block_node_or_indentless_sequence?)?)*
+//
+// BLOCK-END
+// *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ // [Go] A tail comment was left from the prior mapping value processed. Emit an event
+ // as it needs to be processed with that value and not the following key.
+ if len(parser.tail_comment) > 0 {
+ *event = yaml_event_t{
+ typ: yaml_TAIL_COMMENT_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ foot_comment: parser.tail_comment,
+ }
+ parser.tail_comment = nil
+ return true
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.typ == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+ }
+
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", context_mark,
+ "did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping ::= BLOCK-MAPPING_START
+//
+// ((KEY block_node_or_indentless_sequence?)?
+//
+// (VALUE block_node_or_indentless_sequence?)?)*
+// ***** *
+// BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_KEY_TOKEN &&
+ token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence ::= FLOW-SEQUENCE-START
+// *******************
+// (flow_sequence_entry FLOW-ENTRY)*
+// * **********
+// flow_sequence_entry?
+// *
+// FLOW-SEQUENCE-END
+// *****************
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", context_mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+ skip_token(parser)
+ return true
+ } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ typ: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+
+ skip_token(parser)
+ return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
+ }
+ return true
+}
+
+// Parse the productions:
+// flow_mapping ::= FLOW-MAPPING-START
+// ******************
+// (flow_mapping_entry FLOW-ENTRY)*
+// * **********
+// flow_mapping_entry?
+// ******************
+// FLOW-MAPPING-END
+// ****************
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.typ == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ context_mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", context_mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.typ == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_VALUE_TOKEN &&
+ token.typ != yaml_FLOW_ENTRY_TOKEN &&
+ token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ typ: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ yaml_parser_set_event_comments(parser, event)
+ skip_token(parser)
+ return true
+}
+
+// Parse the productions:
+// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// * ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+ if token.typ == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+ *event = yaml_event_t{
+ typ: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil, // Empty
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 || token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+ if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+ return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+ for i := range parser.tag_directives {
+ if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ // [Go] I suspect the copy is unnecessary. This was likely done
+ // because there was no way to track ownership of the data.
+ value_copy := yaml_tag_directive_t{
+ handle: make([]byte, len(value.handle)),
+ prefix: make([]byte, len(value.prefix)),
+ }
+ copy(value_copy.handle, value.handle)
+ copy(value_copy.prefix, value.prefix)
+ parser.tag_directives = append(parser.tag_directives, value_copy)
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go
new file mode 100644
index 0000000..b7de0a8
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/readerc.go
@@ -0,0 +1,434 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+ return false
+}
+
+// Byte order marks.
+const (
+ bom_UTF8 = "\xef\xbb\xbf"
+ bom_UTF16LE = "\xff\xfe"
+ bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ // Ensure that we had enough bytes in the raw buffer.
+ for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ // Determine the encoding.
+ buf := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ avail := len(buf) - pos
+ if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+ return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ // Return if the raw buffer is full.
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ // Return on EOF.
+ if parser.eof {
+ return true
+ }
+
+ // Move the remaining bytes in the raw buffer to the beginning.
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ // Call the read handler to fill the buffer.
+ size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+ }
+ return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ // [Go] This function was changed to guarantee the requested length size at EOF.
+ // The fact we need to do this is pretty awful, but the description above implies
+ // for that to be the case, and there are tests
+
+ // If the EOF flag is set and the raw buffer is empty, do nothing.
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ // [Go] ACTUALLY! Read the documentation of this function above.
+ // This is just broken. To return true, we need to have the
+ // given length in the buffer. Not doing that means every single
+ // check that calls this function to make sure the buffer has a
+ // given length is Go) panicking; or C) accessing invalid memory.
+ //return true
+ }
+
+ // Return if the buffer contains enough characters.
+ if parser.unread >= length {
+ return true
+ }
+
+ // Determine the input encoding if it is not known yet.
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ // Move the unread characters to the beginning of the buffer.
+ buffer_len := len(parser.buffer)
+ if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_len -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_len {
+ buffer_len = 0
+ parser.buffer_pos = 0
+ }
+
+ // Open the whole buffer for writing, and cut it before returning.
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ // Fill the buffer until it has enough characters.
+ first := true
+ for parser.unread < length {
+
+ // Fill the raw buffer if necessary.
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_len]
+ return false
+ }
+ }
+ first = false
+
+ // Decode the raw buffer.
+ inner:
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var width int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+ // Decode the next character.
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+ // Decode a UTF-8 character. Check RFC 3629
+ // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ //
+ // The following table (taken from the RFC) is used for
+ // decoding.
+ //
+ // Char. number range | UTF-8 octet sequence
+ // (hexadecimal) | (binary)
+ // --------------------+------------------------------------
+ // 0000 0000-0000 007F | 0xxxxxxx
+ // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ //
+ // Additionally, the characters in the range 0xD800-0xDFFF
+ // are prohibited as they are reserved for use with UTF-16
+ // surrogate pairs.
+
+ // Determine the length of the UTF-8 sequence.
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ switch {
+ case octet&0x80 == 0x00:
+ width = 1
+ case octet&0xE0 == 0xC0:
+ width = 2
+ case octet&0xF0 == 0xE0:
+ width = 3
+ case octet&0xF8 == 0xF0:
+ width = 4
+ default:
+ // The leading octet is invalid.
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ // Check if the raw buffer contains an incomplete character.
+ if width > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Decode the leading octet.
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ // Check and decode the trailing octets.
+ for k := 1; k < width; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ // Check if the octet is valid.
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ // Decode the octet.
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ // Check the length of the sequence against the value.
+ switch {
+ case width == 1:
+ case width == 2 && value >= 0x80:
+ case width == 3 && value >= 0x800:
+ case width == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ // Check the range of the value.
+ if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+
+ case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ low, high = 1, 0
+ }
+
+ // The UTF-16 encoding is not as simple as one might
+ // naively think. Check RFC 2781
+ // (http://www.ietf.org/rfc/rfc2781.txt).
+ //
+ // Normally, two subsequent bytes describe a Unicode
+ // character. However a special technique (called a
+ // surrogate pair) is used for specifying character
+ // values larger than 0xFFFF.
+ //
+ // A surrogate pair consists of two pseudo-characters:
+ // high surrogate area (0xD800-0xDBFF)
+ // low surrogate area (0xDC00-0xDFFF)
+ //
+ // The following formulas are used for decoding
+ // and encoding characters using surrogate pairs:
+ //
+ // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ // W1 = 110110yyyyyyyyyy
+ // W2 = 110111xxxxxxxxxx
+ //
+ // where U is the character value, W1 is the high surrogate
+ // area, W2 is the low surrogate area.
+
+ // Check for incomplete UTF-16 character.
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the character.
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ // Check for unexpected low surrogate area.
+ if value&0xFC00 == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ // Check for a high surrogate area.
+ if value&0xFC00 == 0xD800 {
+ width = 4
+
+ // Check for incomplete surrogate pair.
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ break inner
+ }
+
+ // Get the next character.
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ // Check for a low surrogate area.
+ if value2&0xFC00 != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ // Generate the value of the surrogate pair.
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ width = 2
+ }
+
+ default:
+ panic("impossible")
+ }
+
+ // Check if the character is in the allowed range:
+ // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ // | [#x10000-#x10FFFF] (32 bit)
+ switch {
+ case value == 0x09:
+ case value == 0x0A:
+ case value == 0x0D:
+ case value >= 0x20 && value <= 0x7E:
+ case value == 0x85:
+ case value >= 0xA0 && value <= 0xD7FF:
+ case value >= 0xE000 && value <= 0xFFFD:
+ case value >= 0x10000 && value <= 0x10FFFF:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ // Move the raw pointers.
+ parser.raw_buffer_pos += width
+ parser.offset += width
+
+ // Finally put the character into the buffer.
+ if value <= 0x7F {
+ // 0000 0000-0000 007F . 0xxxxxxx
+ parser.buffer[buffer_len+0] = byte(value)
+ buffer_len += 1
+ } else if value <= 0x7FF {
+ // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+ buffer_len += 2
+ } else if value <= 0xFFFF {
+ // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+ buffer_len += 3
+ } else {
+ // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+ buffer_len += 4
+ }
+
+ parser.unread++
+ }
+
+ // On EOF, put NUL into the buffer and return.
+ if parser.eof {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ parser.unread++
+ break
+ }
+ }
+ // [Go] Read the documentation of this function above. To return true,
+ // we need to have the given length in the buffer. Not doing that means
+ // every single check that calls this function to make sure the buffer
+ // has a given length is Go) panicking; or C) accessing invalid memory.
+ // This happens here due to the EOF above breaking early.
+ for buffer_len < length {
+ parser.buffer[buffer_len] = 0
+ buffer_len++
+ }
+ parser.buffer = parser.buffer[:buffer_len]
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go
new file mode 100644
index 0000000..64ae888
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/resolve.go
@@ -0,0 +1,326 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "encoding/base64"
+ "math"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type resolveMapItem struct {
+ value interface{}
+ tag string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+ t := resolveTable
+ t[int('+')] = 'S' // Sign
+ t[int('-')] = 'S'
+ for _, c := range "0123456789" {
+ t[int(c)] = 'D' // Digit
+ }
+ for _, c := range "yYnNtTfFoO~" {
+ t[int(c)] = 'M' // In map
+ }
+ t[int('.')] = '.' // Float (potentially in map)
+
+ var resolveMapList = []struct {
+ v interface{}
+ tag string
+ l []string
+ }{
+ {true, boolTag, []string{"true", "True", "TRUE"}},
+ {false, boolTag, []string{"false", "False", "FALSE"}},
+ {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}},
+ {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}},
+ {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}},
+ {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}},
+ {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}},
+ {"<<", mergeTag, []string{"<<"}},
+ }
+
+ m := resolveMap
+ for _, item := range resolveMapList {
+ for _, s := range item.l {
+ m[s] = resolveMapItem{item.v, item.tag}
+ }
+ }
+}
+
+const (
+ nullTag = "!!null"
+ boolTag = "!!bool"
+ strTag = "!!str"
+ intTag = "!!int"
+ floatTag = "!!float"
+ timestampTag = "!!timestamp"
+ seqTag = "!!seq"
+ mapTag = "!!map"
+ binaryTag = "!!binary"
+ mergeTag = "!!merge"
+)
+
+var longTags = make(map[string]string)
+var shortTags = make(map[string]string)
+
+func init() {
+ for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} {
+ ltag := longTag(stag)
+ longTags[stag] = ltag
+ shortTags[ltag] = stag
+ }
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+ if strings.HasPrefix(tag, longTagPrefix) {
+ if stag, ok := shortTags[tag]; ok {
+ return stag
+ }
+ return "!!" + tag[len(longTagPrefix):]
+ }
+ return tag
+}
+
+func longTag(tag string) string {
+ if strings.HasPrefix(tag, "!!") {
+ if ltag, ok := longTags[tag]; ok {
+ return ltag
+ }
+ return longTagPrefix + tag[2:]
+ }
+ return tag
+}
+
+func resolvableTag(tag string) bool {
+ switch tag {
+ case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag:
+ return true
+ }
+ return false
+}
+
+var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+ tag = shortTag(tag)
+ if !resolvableTag(tag) {
+ return tag, in
+ }
+
+ defer func() {
+ switch tag {
+ case "", rtag, strTag, binaryTag:
+ return
+ case floatTag:
+ if rtag == intTag {
+ switch v := out.(type) {
+ case int64:
+ rtag = floatTag
+ out = float64(v)
+ return
+ case int:
+ rtag = floatTag
+ out = float64(v)
+ return
+ }
+ }
+ }
+ failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+ }()
+
+ // Any data is accepted as a !!str or !!binary.
+ // Otherwise, the prefix is enough of a hint about what it might be.
+ hint := byte('N')
+ if in != "" {
+ hint = resolveTable[in[0]]
+ }
+ if hint != 0 && tag != strTag && tag != binaryTag {
+ // Handle things we can lookup in a map.
+ if item, ok := resolveMap[in]; ok {
+ return item.tag, item.value
+ }
+
+ // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+ // are purposefully unsupported here. They're still quoted on
+ // the way out for compatibility with other parser, though.
+
+ switch hint {
+ case 'M':
+ // We've already checked the map above.
+
+ case '.':
+ // Not in the map, so maybe a normal float.
+ floatv, err := strconv.ParseFloat(in, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+
+ case 'D', 'S':
+ // Int, float, or timestamp.
+ // Only try values as a timestamp if the value is unquoted or there's an explicit
+ // !!timestamp tag.
+ if tag == "" || tag == timestampTag {
+ t, ok := parseTimestamp(in)
+ if ok {
+ return timestampTag, t
+ }
+ }
+
+ plain := strings.Replace(in, "_", "", -1)
+ intv, err := strconv.ParseInt(plain, 0, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain, 0, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ if yamlStyleFloat.MatchString(plain) {
+ floatv, err := strconv.ParseFloat(plain, 64)
+ if err == nil {
+ return floatTag, floatv
+ }
+ }
+ if strings.HasPrefix(plain, "0b") {
+ intv, err := strconv.ParseInt(plain[2:], 2, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0b") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 2, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ // Octals as introduced in version 1.2 of the spec.
+ // Octals from the 1.1 spec, spelled as 0777, are still
+ // decoded by default in v3 as well for compatibility.
+ // May be dropped in v4 depending on how usage evolves.
+ if strings.HasPrefix(plain, "0o") {
+ intv, err := strconv.ParseInt(plain[2:], 8, 64)
+ if err == nil {
+ if intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ uintv, err := strconv.ParseUint(plain[2:], 8, 64)
+ if err == nil {
+ return intTag, uintv
+ }
+ } else if strings.HasPrefix(plain, "-0o") {
+ intv, err := strconv.ParseInt("-"+plain[3:], 8, 64)
+ if err == nil {
+ if true || intv == int64(int(intv)) {
+ return intTag, int(intv)
+ } else {
+ return intTag, intv
+ }
+ }
+ }
+ default:
+ panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")")
+ }
+ }
+ return strTag, in
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+ const lineLen = 70
+ encLen := base64.StdEncoding.EncodedLen(len(s))
+ lines := encLen/lineLen + 1
+ buf := make([]byte, encLen*2+lines)
+ in := buf[0:encLen]
+ out := buf[encLen:]
+ base64.StdEncoding.Encode(in, []byte(s))
+ k := 0
+ for i := 0; i < len(in); i += lineLen {
+ j := i + lineLen
+ if j > len(in) {
+ j = len(in)
+ }
+ k += copy(out[k:], in[i:j])
+ if lines > 1 {
+ out[k] = '\n'
+ k++
+ }
+ }
+ return string(out[:k])
+}
+
+// This is a subset of the formats allowed by the regular expression
+// defined at http://yaml.org/type/timestamp.html.
+var allowedTimestampFormats = []string{
+ "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
+ "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
+ "2006-1-2 15:4:5.999999999", // space separated with no time zone
+ "2006-1-2", // date only
+ // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
+ // from the set of examples.
+}
+
+// parseTimestamp parses s as a timestamp string and
+// returns the timestamp and reports whether it succeeded.
+// Timestamp formats are defined at http://yaml.org/type/timestamp.html
+func parseTimestamp(s string) (time.Time, bool) {
+ // TODO write code to check all the formats supported by
+ // http://yaml.org/type/timestamp.html instead of using time.Parse.
+
+ // Quick check: all date formats start with YYYY-.
+ i := 0
+ for ; i < len(s); i++ {
+ if c := s[i]; c < '0' || c > '9' {
+ break
+ }
+ }
+ if i != 4 || i == len(s) || s[i] != '-' {
+ return time.Time{}, false
+ }
+ for _, format := range allowedTimestampFormats {
+ if t, err := time.Parse(format, s); err == nil {
+ return t, true
+ }
+ }
+ return time.Time{}, false
+}
diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go
new file mode 100644
index 0000000..ca00701
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/scannerc.go
@@ -0,0 +1,3038 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward. The issues are "block collection start" and
+// "simple keys". Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented. We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+// STREAM-START(encoding) # The stream start.
+// STREAM-END # The stream end.
+// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+// DOCUMENT-START # '---'
+// DOCUMENT-END # '...'
+// BLOCK-SEQUENCE-START # Indentation increase denoting a block
+// BLOCK-MAPPING-START # sequence or a block mapping.
+// BLOCK-END # Indentation decrease.
+// FLOW-SEQUENCE-START # '['
+// FLOW-SEQUENCE-END # ']'
+// BLOCK-SEQUENCE-START # '{'
+// BLOCK-SEQUENCE-END # '}'
+// BLOCK-ENTRY # '-'
+// FLOW-ENTRY # ','
+// KEY # '?' or nothing (simple keys).
+// VALUE # ':'
+// ALIAS(anchor) # '*anchor'
+// ANCHOR(anchor) # '&anchor'
+// TAG(handle,suffix) # '!handle!suffix'
+// SCALAR(value,style) # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+// STREAM-START(encoding)
+// STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+// VERSION-DIRECTIVE(major,minor)
+// TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+// %YAML 1.1
+// %TAG ! !foo
+// %TAG !yaml! tag:yaml.org,2002:
+// ---
+//
+// The correspoding sequence of tokens:
+//
+// STREAM-START(utf-8)
+// VERSION-DIRECTIVE(1,1)
+// TAG-DIRECTIVE("!","!foo")
+// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+// DOCUMENT-START
+// STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+// DOCUMENT-START
+// DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+// 1. An implicit document:
+//
+// 'a scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// STREAM-END
+//
+// 2. An explicit document:
+//
+// ---
+// 'a scalar'
+// ...
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-END
+// STREAM-END
+//
+// 3. Several documents in a stream:
+//
+// 'a scalar'
+// ---
+// 'another scalar'
+// ---
+// 'yet another scalar'
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// SCALAR("a scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("another scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("yet another scalar",single-quoted)
+// STREAM-END
+//
+// We have already introduced the SCALAR token above. The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+// ALIAS(anchor)
+// ANCHOR(anchor)
+// TAG(handle,suffix)
+// SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+// 1. A recursive sequence:
+//
+// &A [ *A ]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// ANCHOR("A")
+// FLOW-SEQUENCE-START
+// ALIAS("A")
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A tagged scalar:
+//
+// !!float "3.14" # A good approximation.
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// TAG("!!","float")
+// SCALAR("3.14",double-quoted)
+// STREAM-END
+//
+// 3. Various scalar styles:
+//
+// --- # Implicit empty plain scalars do not produce tokens.
+// --- a plain scalar
+// --- 'a single-quoted scalar'
+// --- "a double-quoted scalar"
+// --- |-
+// a literal scalar
+// --- >-
+// a folded
+// scalar
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// DOCUMENT-START
+// DOCUMENT-START
+// SCALAR("a plain scalar",plain)
+// DOCUMENT-START
+// SCALAR("a single-quoted scalar",single-quoted)
+// DOCUMENT-START
+// SCALAR("a double-quoted scalar",double-quoted)
+// DOCUMENT-START
+// SCALAR("a literal scalar",literal)
+// DOCUMENT-START
+// SCALAR("a folded scalar",folded)
+// STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+// FLOW-SEQUENCE-START
+// FLOW-SEQUENCE-END
+// FLOW-MAPPING-START
+// FLOW-MAPPING-END
+// FLOW-ENTRY
+// KEY
+// VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+// 1. A flow sequence:
+//
+// [item 1, item 2, item 3]
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-SEQUENCE-START
+// SCALAR("item 1",plain)
+// FLOW-ENTRY
+// SCALAR("item 2",plain)
+// FLOW-ENTRY
+// SCALAR("item 3",plain)
+// FLOW-SEQUENCE-END
+// STREAM-END
+//
+// 2. A flow mapping:
+//
+// {
+// a simple key: a value, # Note that the KEY token is produced.
+// ? a complex key: another value,
+// }
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// FLOW-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// FLOW-ENTRY
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// FLOW-ENTRY
+// FLOW-MAPPING-END
+// STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator. Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+// BLOCK-SEQUENCE-START
+// BLOCK-MAPPING-START
+// BLOCK-END
+// BLOCK-ENTRY
+// KEY
+// VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+// 1. Block sequences:
+//
+// - item 1
+// - item 2
+// -
+// - item 3.1
+// - item 3.2
+// -
+// key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 3.1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 3.2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Block mappings:
+//
+// a simple key: a value # The KEY token is produced here.
+// ? a complex key
+// : another value
+// a mapping:
+// key 1: value 1
+// key 2: value 2
+// a sequence:
+// - item 1
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a simple key",plain)
+// VALUE
+// SCALAR("a value",plain)
+// KEY
+// SCALAR("a complex key",plain)
+// VALUE
+// SCALAR("another value",plain)
+// KEY
+// SCALAR("a mapping",plain)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line. If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line. The following examples
+// illustrate this case:
+//
+// 1. Collections in a sequence:
+//
+// - - item 1
+// - item 2
+// - key 1: value 1
+// key 2: value 2
+// - ? complex key
+// : complex value
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-ENTRY
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("complex key")
+// VALUE
+// SCALAR("complex value")
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// 2. Collections in a mapping:
+//
+// ? a sequence
+// : - item 1
+// - item 2
+// ? a mapping
+// : key 1: value 1
+// key 2: value 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("a sequence",plain)
+// VALUE
+// BLOCK-SEQUENCE-START
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+// KEY
+// SCALAR("a mapping",plain)
+// VALUE
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key 1",plain)
+// VALUE
+// SCALAR("value 1",plain)
+// KEY
+// SCALAR("key 2",plain)
+// VALUE
+// SCALAR("value 2",plain)
+// BLOCK-END
+// BLOCK-END
+// STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+// key:
+// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+// - item 2
+//
+// Tokens:
+//
+// STREAM-START(utf-8)
+// BLOCK-MAPPING-START
+// KEY
+// SCALAR("key",plain)
+// VALUE
+// BLOCK-ENTRY
+// SCALAR("item 1",plain)
+// BLOCK-ENTRY
+// SCALAR("item 2",plain)
+// BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+ // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+ return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ parser.newlines++
+ } else if is_break(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ parser.newlines++
+ }
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ parser.newlines = 0
+ }
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ switch {
+ case buf[pos] == '\r' && buf[pos+1] == '\n':
+ // CR LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ case buf[pos] == '\r' || buf[pos] == '\n':
+ // CR|LF . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+ // NEL . LF
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ default:
+ return s
+ }
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.newlines++
+ return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Erase the token object.
+ *token = yaml_token_t{} // [Go] Is this necessary?
+
+ // No tokens after STREAM-END or error.
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ // Ensure that the tokens queue contains enough tokens.
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ // Fetch the next token from the queue.
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.tokens_parsed++
+ parser.token_available = false
+
+ if token.typ == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+ return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+ context := "while parsing a tag"
+ if directive {
+ context = "while parsing a %TAG directive"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, problem)
+}
+
+func trace(args ...interface{}) func() {
+ pargs := append([]interface{}{"+++"}, args...)
+ fmt.Println(pargs...)
+ pargs = append([]interface{}{"---"}, args...)
+ return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ // While we need more tokens to fetch, do it.
+ for {
+ // [Go] The comment parsing logic requires a lookahead of two tokens
+ // so that foot comments may be parsed in time of associating them
+ // with the tokens that are parsed before them, and also for line
+ // comments to be transformed into head comments in some edge cases.
+ if parser.tokens_head < len(parser.tokens)-2 {
+ // If a potential simple key is at the head position, we need to fetch
+ // the next token to disambiguate it.
+ head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed]
+ if !ok {
+ break
+ } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok {
+ return false
+ } else if !valid {
+ break
+ }
+ }
+ // Fetch the next token.
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+ }
+
+ parser.token_available = true
+ return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) {
+ // Ensure that the buffer is initialized.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we just started scanning. Fetch STREAM-START then.
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ scan_mark := parser.mark
+
+ // Eat whitespaces and comments until we reach the next token.
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ // [Go] While unrolling indents, transform the head comments of prior
+ // indentation levels observed after scan_start into foot comments at
+ // the respective indexes.
+
+ // Check the indentation level against the current column.
+ if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) {
+ return false
+ }
+
+ // Ensure that the buffer contains at least 4 characters. 4 is the length
+ // of the longest indicators ('--- ' and '... ').
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ // Is it the end of the stream?
+ if is_z(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ // Is it a directive?
+ if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ // Is it the document start indicator?
+ if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+ }
+
+ // Is it the document end indicator?
+ if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+ }
+
+ comment_mark := parser.mark
+ if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') {
+ // Associate any following comments with the prior token.
+ comment_mark = parser.tokens[len(parser.tokens)-1].start_mark
+ }
+ defer func() {
+ if !ok {
+ return
+ }
+ if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN {
+ // Sequence indicators alone have no line comments. It becomes
+ // a head comment for whatever follows.
+ return
+ }
+ if !yaml_parser_scan_line_comment(parser, comment_mark) {
+ ok = false
+ return
+ }
+ }()
+
+ // Is it the flow sequence start indicator?
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ // Is it the flow mapping start indicator?
+ if parser.buffer[parser.buffer_pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ // Is it the flow sequence end indicator?
+ if parser.buffer[parser.buffer_pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ // Is it the flow mapping end indicator?
+ if parser.buffer[parser.buffer_pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ // Is it the flow entry indicator?
+ if parser.buffer[parser.buffer_pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ // Is it the block entry indicator?
+ if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ // Is it the key indicator?
+ if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ // Is it the value indicator?
+ if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ // Is it an alias?
+ if parser.buffer[parser.buffer_pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ // Is it an anchor?
+ if parser.buffer[parser.buffer_pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ // Is it a tag?
+ if parser.buffer[parser.buffer_pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ // Is it a literal scalar?
+ if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ // Is it a folded scalar?
+ if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ // Is it a single-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ // Is it a double-quoted scalar?
+ if parser.buffer[parser.buffer_pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ // Is it a plain scalar?
+ //
+ // A plain scalar may start with any non-blank characters except
+ //
+ // '-', '?', ':', ',', '[', ']', '{', '}',
+ // '#', '&', '*', '!', '|', '>', '\'', '\"',
+ // '%', '@', '`'.
+ //
+ // In the block context (and, for the '-' indicator, in the flow context
+ // too), it may also start with the characters
+ //
+ // '-', '?', ':'
+ //
+ // if it is followed by a non-space character.
+ //
+ // The last rule is more restrictive than the specification requires.
+ // [Go] TODO Make this logic more reasonable.
+ //switch parser.buffer[parser.buffer_pos] {
+ //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+ //}
+ if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+ parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+ parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+ (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level == 0 &&
+ (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+ !is_blankz(parser.buffer, parser.buffer_pos+1)) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ // If we don't determine the token type so far, it is an error.
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) {
+ if !simple_key.possible {
+ return false, true
+ }
+
+ // The 1.2 specification says:
+ //
+ // "If the ? indicator is omitted, parsing needs to see past the
+ // implicit key to recognize it as such. To limit the amount of
+ // lookahead required, the “:” indicator must appear at most 1024
+ // Unicode characters beyond the start of the key. In addition, the key
+ // is restricted to a single line."
+ //
+ if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index {
+ // Check if the potential simple key to be removed is required.
+ if simple_key.required {
+ return false, yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ simple_key.possible = false
+ return false, true
+ }
+ return true, true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ // A simple key is required at the current position if the scanner is in
+ // the block context and the current column coincides with the indentation
+ // level.
+
+ required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+ //
+ // If the current position may start a simple key, save it.
+ //
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ }
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1
+ }
+ return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ i := len(parser.simple_keys) - 1
+ if parser.simple_keys[i].possible {
+ // If the key is required, it is an error.
+ if parser.simple_keys[i].required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", parser.simple_keys[i].mark,
+ "could not find expected ':'")
+ }
+ // Remove the key from the stack.
+ parser.simple_keys[i].possible = false
+ delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number)
+ }
+ return true
+}
+
+// max_flow_level limits the flow_level
+const max_flow_level = 10000
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ // Reset the simple key on the next level.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{
+ possible: false,
+ required: false,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ mark: parser.mark,
+ })
+
+ // Increase the flow level.
+ parser.flow_level++
+ if parser.flow_level > max_flow_level {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_flow_level))
+ }
+ return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ last := len(parser.simple_keys) - 1
+ delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number)
+ parser.simple_keys = parser.simple_keys[:last]
+ }
+ return true
+}
+
+// max_indents limits the indents stack size
+const max_indents = 10000
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level. In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent < column {
+ // Push the current indentation level to the stack and set the new
+ // indentation level.
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+ if len(parser.indents) > max_indents {
+ return yaml_parser_set_scanner_error(parser,
+ "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
+ fmt.Sprintf("exceeded max depth of %d", max_indents))
+ }
+
+ // Create a token and insert it into the queue.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: mark,
+ end_mark: mark,
+ }
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ yaml_insert_token(parser, number, &token)
+ }
+ return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column. For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool {
+ // In the flow context, do nothing.
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ block_mark := scan_mark
+ block_mark.index--
+
+ // Loop through the indentation levels in the stack.
+ for parser.indent > column {
+
+ // [Go] Reposition the end token before potential following
+ // foot comments of parent blocks. For that, search
+ // backwards for recent comments that were at the same
+ // indent as the block that is ending now.
+ stop_index := block_mark.index
+ for i := len(parser.comments) - 1; i >= 0; i-- {
+ comment := &parser.comments[i]
+
+ if comment.end_mark.index < stop_index {
+ // Don't go back beyond the start of the comment/whitespace scan, unless column < 0.
+ // If requested indent column is < 0, then the document is over and everything else
+ // is a foot anyway.
+ break
+ }
+ if comment.start_mark.column == parser.indent+1 {
+ // This is a good match. But maybe there's a former comment
+ // at that same indent level, so keep searching.
+ block_mark = comment.start_mark
+ }
+
+ // While the end of the former comment matches with
+ // the start of the following one, we know there's
+ // nothing in between and scanning is still safe.
+ stop_index = comment.scan_mark.index
+ }
+
+ // Create a token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_END_TOKEN,
+ start_mark: block_mark,
+ end_mark: block_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+
+ // Pop the indentation level.
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+ return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+ // Set the initial indentation.
+ parser.indent = -1
+
+ // Initialize the simple key stack.
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ parser.simple_keys_by_tok = make(map[int]int)
+
+ // A simple key is allowed at the beginning of the stream.
+ parser.simple_key_allowed = true
+
+ // We have started.
+ parser.stream_start_produced = true
+
+ // Create the STREAM-START token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+ // Force new line.
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the STREAM-END token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ token := yaml_token_t{}
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset the indentation level.
+ if !yaml_parser_unroll_indent(parser, -1, parser.mark) {
+ return false
+ }
+
+ // Reset simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ // Create the DOCUMENT-START or DOCUMENT-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+
+ // The indicators '[' and '{' may start a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // Increase the flow level.
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ // A simple key may follow the indicators '[' and '{'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // Reset any potential simple key on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Decrease the flow level.
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ // No simple keys after the indicators ']' and '}'.
+ parser.simple_key_allowed = false
+
+ // Consume the token.
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+ token := yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ // Append the token to the queue.
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after ','.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the FLOW-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+ // Check if the scanner is in the block context.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new entry.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+ // Add the BLOCK-SEQUENCE-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ // It is an error for the '-' indicator to occur in the flow context,
+ // but we let the Parser detect and report about it because the Parser
+ // is able to point to the context.
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '-'.
+ parser.simple_key_allowed = true
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the BLOCK-ENTRY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+ // In the block context, additional checks are required.
+ if parser.flow_level == 0 {
+ // Check if we are allowed to start a new key (not nessesary simple).
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Reset any potential simple keys on the current flow level.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // Simple keys are allowed after '?' in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the KEY token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ // Have we found a simple key?
+ if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok {
+ return false
+
+ } else if valid {
+
+ // Create the KEY token and insert it into the queue.
+ token := yaml_token_t{
+ typ: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+ yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ // In the block context, we may need to add the BLOCK-MAPPING-START token.
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ // Remove the simple key.
+ simple_key.possible = false
+ delete(parser.simple_keys_by_tok, simple_key.token_number)
+
+ // A simple key cannot follow another simple key.
+ parser.simple_key_allowed = false
+
+ } else {
+ // The ':' indicator follows a complex key.
+
+ // In the block context, extra checks are required.
+ if parser.flow_level == 0 {
+
+ // Check if we are allowed to start a complex value.
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ // Add the BLOCK-MAPPING-START token if needed.
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ // Simple keys after ':' are allowed in the block context.
+ parser.simple_key_allowed = parser.flow_level == 0
+ }
+
+ // Consume the token.
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create the VALUE token and append it to the queue.
+ token := yaml_token_t{
+ typ: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+ // An anchor or an alias could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow an anchor or an alias.
+ parser.simple_key_allowed = false
+
+ // Create the ALIAS or ANCHOR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, typ) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ // A tag could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a tag.
+ parser.simple_key_allowed = false
+
+ // Create the TAG token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ // Remove any potential simple keys.
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ // A simple key may follow a block scalar.
+ parser.simple_key_allowed = true
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ // A plain scalar could be a simple key.
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ // A simple key cannot follow a flow scalar.
+ parser.simple_key_allowed = false
+
+ // Create the SCALAR token and append it to the queue.
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+ yaml_insert_token(parser, -1, &token)
+ return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+ scan_mark := parser.mark
+
+ // Until the next token is not found.
+ for {
+ // Allow the BOM mark to start a line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ // Eat whitespaces.
+ // Tabs are allowed:
+ // - in the flow context
+ // - in the block context, but not at the beginning of the line or
+ // after '-', '?', or ':' (complex value).
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if we just had a line comment under a sequence entry that
+ // looks more like a header to the following content. Similar to this:
+ //
+ // - # The comment
+ // - Some data
+ //
+ // If so, transform the line comment to a head comment and reposition.
+ if len(parser.comments) > 0 && len(parser.tokens) > 1 {
+ tokenA := parser.tokens[len(parser.tokens)-2]
+ tokenB := parser.tokens[len(parser.tokens)-1]
+ comment := &parser.comments[len(parser.comments)-1]
+ if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) {
+ // If it was in the prior line, reposition so it becomes a
+ // header of the follow up token. Otherwise, keep it in place
+ // so it becomes a header of the former.
+ comment.head = comment.line
+ comment.line = nil
+ if comment.start_mark.line == parser.mark.line-1 {
+ comment.token_mark = parser.mark
+ }
+ }
+ }
+
+ // Eat a comment until a line break.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_comments(parser, scan_mark) {
+ return false
+ }
+ }
+
+ // If it is a line break, eat it.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ // In the block context, a new line may start a simple key.
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ break // We have found a token.
+ }
+ }
+
+ return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ // Eat '%'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the directive name.
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ // Is it a YAML directive?
+ if bytes.Equal(name, []byte("YAML")) {
+ // Scan the VERSION directive value.
+ var major, minor int8
+ if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a VERSION-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+
+ // Is it a TAG directive?
+ } else if bytes.Equal(name, []byte("TAG")) {
+ // Scan the TAG directive value.
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+ return false
+ }
+ end_mark := parser.mark
+
+ // Create a TAG-DIRECTIVE token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+
+ // Unknown directive.
+ } else {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unknown directive name")
+ return false
+ }
+
+ // Eat the rest of the line including any comments.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ // [Go] Discard this inline comment for the time being.
+ //if !yaml_parser_scan_line_comment(parser, start_mark) {
+ // return false
+ //}
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+ // Consume the directive name.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the name is empty.
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ // Check for an blank character after the name.
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+ *name = s
+ return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the major version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ // Eat '.'.
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ // Consume the minor version number.
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+ return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+// %YAML 1.1 # a comment \n
+// ^
+// %YAML 1.1 # a comment \n
+// ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+ // Repeat while the next character is digit.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var value, length int8
+ for is_digit(parser.buffer, parser.buffer_pos) {
+ // Check if the number is too long.
+ length++
+ if length > max_number_length {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+ value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the number was present.
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+ *number = value
+ return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+// %TAG !yaml! tag:yaml.org,2002: \n
+// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+ var handle_value, prefix_value []byte
+
+ // Eat whitespaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ // Expect a whitespace.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blank(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ // Eat whitespaces.
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Scan a prefix.
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ // Expect a whitespace or line break.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+ var s []byte
+
+ // Eat the indicator character.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the value.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ if len(s) == 0 ||
+ !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+ parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '`') {
+ context := "while scanning an alias"
+ if typ == yaml_ANCHOR_TOKEN {
+ context = "while scanning an anchor"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: typ,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var handle, suffix []byte
+
+ start_mark := parser.mark
+
+ // Check if the tag is in the canonical form.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ // Keep the handle as ''
+
+ // Eat '!<'
+ skip(parser)
+ skip(parser)
+
+ // Consume the tag value.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ // Check for '>' and eat it.
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else {
+ // The tag has either the '!suffix' or the '!handle!suffix' form.
+
+ // First, try to scan a handle.
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ // Check if it is, indeed, handle.
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ // Scan the suffix now.
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ // It wasn't a handle after all. Scan the rest of the tag.
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ // Set the handle to '!'.
+ handle = []byte{'!'}
+
+ // A special case: the '!' tag. Set the handle to '' and the
+ // suffix to '!'.
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+ }
+ }
+
+ // Check the character which ends the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if !is_blankz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+ return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+ // Check the initial '!' character.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ var s []byte
+
+ // Copy the '!' character.
+ s = read(parser, s)
+
+ // Copy all subsequent alphabetical and numerical characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_alpha(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check if the trailing character is '!' and copy it.
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ // It's either the '!' tag or not really a tag handle. If it's a %TAG
+ // directive, it's an error. If it's a tag token, it must be a part of URI.
+ if directive && string(s) != "!" {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+ return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+ //size_t length = head ? strlen((char *)head) : 0
+ var s []byte
+ hasTag := len(head) > 0
+
+ // Copy the head if needed.
+ //
+ // Note that we don't copy the leading '!' character.
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ // Scan the tag.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // The set of characters that may appear in URI is as follows:
+ //
+ // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ // '%'.
+ // [Go] TODO Convert this into more reasonable logic.
+ for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+ parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+ parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+ parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+ parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+ parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+ parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+ parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+ parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+ parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+ parser.buffer[parser.buffer_pos] == '%' {
+ // Check if it is a URI-escape sequence.
+ if parser.buffer[parser.buffer_pos] == '%' {
+ if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ hasTag = true
+ }
+
+ if !hasTag {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+ *uri = s
+ return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+ // Decode the required number of characters.
+ w := 1024
+ for w > 0 {
+ // Check for a URI-escaped octet.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer, parser.buffer_pos+1) &&
+ is_hex(parser.buffer, parser.buffer_pos+2)) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ // Get the octet.
+ octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+ // If it is the leading octet, determine the length of the UTF-8 sequence.
+ if w == 1024 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ // Check if the trailing octet is correct.
+ if octet&0xC0 != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ // Copy the octet and move the pointers.
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+ return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+ // Eat the indicator '|' or '>'.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Scan the additional block scalar indicators.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check for a chomping indicator.
+ var chomping, increment int
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ // Set the chomping method and eat the indicator.
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+
+ // Check for an indentation indicator.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_digit(parser.buffer, parser.buffer_pos) {
+ // Check that the indentation is greater than 0.
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ // Get the indentation level and eat the indicator.
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+ }
+
+ } else if is_digit(parser.buffer, parser.buffer_pos) {
+ // Do the same as above, but in the opposite order.
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+ increment = as_digit(parser.buffer, parser.buffer_pos)
+ skip(parser)
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+ skip(parser)
+ }
+ }
+
+ // Eat whitespaces and comments to the end of the line.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for is_blank(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.buffer[parser.buffer_pos] == '#' {
+ if !yaml_parser_scan_line_comment(parser, start_mark) {
+ return false
+ }
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ // Check if we are at the end of the line.
+ if !is_breakz(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ // Eat a line break.
+ if is_break(parser.buffer, parser.buffer_pos) {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ // Set the indentation level if it was specified.
+ var indent int
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ // Scan the leading line breaks and determine the indentation level if needed.
+ var s, leading_break, trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+
+ // Scan the block scalar content.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ var leading_blank, trailing_blank bool
+ for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+ // We are at the beginning of a non-empty line.
+
+ // Is it a trailing whitespace?
+ trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Check if we need to fold the leading line break.
+ if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+ // Do we need to join the lines by space?
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ } else {
+ s = append(s, leading_break...)
+ }
+ leading_break = leading_break[:0]
+
+ // Append the remaining line breaks.
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ // Is it a leading whitespace?
+ leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+ // Consume the current line.
+ for !is_breakz(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ // Eat the following indentation spaces and line breaks.
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ // Chomp the tail.
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar. Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+ *end_mark = parser.mark
+
+ // Eat the indentation spaces and line breaks.
+ max_indent := 0
+ for {
+ // Eat the indentation spaces.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ // Check for a tab character messing the indentation.
+ if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ // Have we found a non-empty line?
+ if !is_break(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ // Consume the line break.
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ // [Go] Should really be returning breaks instead.
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ // Determine the indentation level if needed.
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+ return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+ // Eat the left quote.
+ start_mark := parser.mark
+ skip(parser)
+
+ // Consume the content of the quoted scalar.
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ for {
+ // Check that there are no document indicators at the beginning of the line.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ // Check for EOF.
+ if is_z(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ // Consume non-blank characters.
+ leading_blanks := false
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+ if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ // It is a right single quote.
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ // It is a right double quote.
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+ // It is an escaped line break.
+ if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+ return false
+ }
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+ // It is an escape sequence.
+ code_length := 0
+
+ // Check the escape character.
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '\'':
+ s = append(s, '\'')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': // NEL (#x85)
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': // #xA0
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': // LS (#x2028)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': // PS (#x2029)
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ // Consume an arbitrary escape code.
+ if code_length > 0 {
+ var value int
+
+ // Scan the character value.
+ if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+ return false
+ }
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer, parser.buffer_pos+k) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+ }
+
+ // Check the value and write the character.
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ // Advance the pointer.
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ // It is a non-escaped non-blank character.
+ s = read(parser, s)
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ // Check if we are at the end of the scalar.
+ if single {
+ if parser.buffer[parser.buffer_pos] == '\'' {
+ break
+ }
+ } else {
+ if parser.buffer[parser.buffer_pos] == '"' {
+ break
+ }
+ }
+
+ // Consume blank characters.
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Join the whitespaces or fold line breaks.
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Eat the right quote.
+ skip(parser)
+ end_mark := parser.mark
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+ var s, leading_break, trailing_breaks, whitespaces []byte
+ var leading_blanks bool
+ var indent = parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ // Consume the content of the plain scalar.
+ for {
+ // Check for a document indicator.
+ if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+ return false
+ }
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos+0] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos+0] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ // Check for a comment.
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ // Consume non-blank characters.
+ for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+ // Check for indicators that may end a plain scalar.
+ if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (parser.buffer[parser.buffer_pos] == ',' ||
+ parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+ parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+ parser.buffer[parser.buffer_pos] == '}')) {
+ break
+ }
+
+ // Check if we need to join whitespaces and breaks.
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ // Do we need to fold line breaks?
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ }
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ }
+ trailing_breaks = trailing_breaks[:0]
+ leading_break = leading_break[:0]
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ // Copy the character.
+ s = read(parser, s)
+
+ end_mark = parser.mark
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ }
+
+ // Is it the end?
+ if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ // Consume blank characters.
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer, parser.buffer_pos) {
+
+ // Check for tab characters that abuse indentation.
+ if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violates indentation")
+ return false
+ }
+
+ // Consume a space or a tab character.
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+
+ // Check if it is a first line break.
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ }
+
+ // Check indentation level.
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ // Create a token.
+ *token = yaml_token_t{
+ typ: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ // Note that we change the 'simple_key_allowed' flag.
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+ return true
+}
+
+func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool {
+ if parser.newlines > 0 {
+ return true
+ }
+
+ var start_mark yaml_mark_t
+ var text []byte
+
+ for peek := 0; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ if parser.buffer[parser.buffer_pos+peek] == '#' {
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ if len(text) == 0 {
+ start_mark = parser.mark
+ }
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+ }
+ break
+ }
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ token_mark: token_mark,
+ start_mark: start_mark,
+ line: text,
+ })
+ }
+ return true
+}
+
+func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool {
+ token := parser.tokens[len(parser.tokens)-1]
+
+ if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 {
+ token = parser.tokens[len(parser.tokens)-2]
+ }
+
+ var token_mark = token.start_mark
+ var start_mark yaml_mark_t
+ var next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+
+ var recent_empty = false
+ var first_empty = parser.newlines <= 1
+
+ var line = parser.mark.line
+ var column = parser.mark.column
+
+ var text []byte
+
+ // The foot line is the place where a comment must start to
+ // still be considered as a foot of the prior content.
+ // If there's some content in the currently parsed line, then
+ // the foot is the line below it.
+ var foot_line = -1
+ if scan_mark.line > 0 {
+ foot_line = parser.mark.line-parser.newlines+1
+ if parser.newlines == 0 && parser.mark.column > 1 {
+ foot_line++
+ }
+ }
+
+ var peek = 0
+ for ; peek < 512; peek++ {
+ if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) {
+ break
+ }
+ column++
+ if is_blank(parser.buffer, parser.buffer_pos+peek) {
+ continue
+ }
+ c := parser.buffer[parser.buffer_pos+peek]
+ var close_flow = parser.flow_level > 0 && (c == ']' || c == '}')
+ if close_flow || is_breakz(parser.buffer, parser.buffer_pos+peek) {
+ // Got line break or terminator.
+ if close_flow || !recent_empty {
+ if close_flow || first_empty && (start_mark.line == foot_line && token.typ != yaml_VALUE_TOKEN || start_mark.column-1 < next_indent) {
+ // This is the first empty line and there were no empty lines before,
+ // so this initial part of the comment is a foot of the prior token
+ // instead of being a head for the following one. Split it up.
+ // Alternatively, this might also be the last comment inside a flow
+ // scope, so it must be a footer.
+ if len(text) > 0 {
+ if start_mark.column-1 < next_indent {
+ // If dedented it's unrelated to the prior token.
+ token_mark = start_mark
+ }
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+ } else {
+ if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 {
+ text = append(text, '\n')
+ }
+ }
+ }
+ if !is_break(parser.buffer, parser.buffer_pos+peek) {
+ break
+ }
+ first_empty = false
+ recent_empty = true
+ column = 0
+ line++
+ continue
+ }
+
+ if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.column) {
+ // The comment at the different indentation is a foot of the
+ // preceding data rather than a head of the upcoming one.
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: token_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek, line, column},
+ foot: text,
+ })
+ scan_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ token_mark = scan_mark
+ text = nil
+ }
+
+ if parser.buffer[parser.buffer_pos+peek] != '#' {
+ break
+ }
+
+ if len(text) == 0 {
+ start_mark = yaml_mark_t{parser.mark.index + peek, line, column}
+ } else {
+ text = append(text, '\n')
+ }
+
+ recent_empty = false
+
+ // Consume until after the consumed comment line.
+ seen := parser.mark.index+peek
+ for {
+ if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+ return false
+ }
+ if is_breakz(parser.buffer, parser.buffer_pos) {
+ if parser.mark.index >= seen {
+ break
+ }
+ if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ } else if parser.mark.index >= seen {
+ text = read(parser, text)
+ } else {
+ skip(parser)
+ }
+ }
+
+ peek = 0
+ column = 0
+ line = parser.mark.line
+ next_indent = parser.indent
+ if next_indent < 0 {
+ next_indent = 0
+ }
+ }
+
+ if len(text) > 0 {
+ parser.comments = append(parser.comments, yaml_comment_t{
+ scan_mark: scan_mark,
+ token_mark: start_mark,
+ start_mark: start_mark,
+ end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column},
+ head: text,
+ })
+ }
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go
new file mode 100644
index 0000000..9210ece
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/sorter.go
@@ -0,0 +1,134 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package yaml
+
+import (
+ "reflect"
+ "unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+ a := l[i]
+ b := l[j]
+ ak := a.Kind()
+ bk := b.Kind()
+ for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+ a = a.Elem()
+ ak = a.Kind()
+ }
+ for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+ b = b.Elem()
+ bk = b.Kind()
+ }
+ af, aok := keyFloat(a)
+ bf, bok := keyFloat(b)
+ if aok && bok {
+ if af != bf {
+ return af < bf
+ }
+ if ak != bk {
+ return ak < bk
+ }
+ return numLess(a, b)
+ }
+ if ak != reflect.String || bk != reflect.String {
+ return ak < bk
+ }
+ ar, br := []rune(a.String()), []rune(b.String())
+ digits := false
+ for i := 0; i < len(ar) && i < len(br); i++ {
+ if ar[i] == br[i] {
+ digits = unicode.IsDigit(ar[i])
+ continue
+ }
+ al := unicode.IsLetter(ar[i])
+ bl := unicode.IsLetter(br[i])
+ if al && bl {
+ return ar[i] < br[i]
+ }
+ if al || bl {
+ if digits {
+ return al
+ } else {
+ return bl
+ }
+ }
+ var ai, bi int
+ var an, bn int64
+ if ar[i] == '0' || br[i] == '0' {
+ for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
+ if ar[j] != '0' {
+ an = 1
+ bn = 1
+ break
+ }
+ }
+ }
+ for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+ an = an*10 + int64(ar[ai]-'0')
+ }
+ for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+ bn = bn*10 + int64(br[bi]-'0')
+ }
+ if an != bn {
+ return an < bn
+ }
+ if ai != bi {
+ return ai < bi
+ }
+ return ar[i] < br[i]
+ }
+ return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return float64(v.Int()), true
+ case reflect.Float32, reflect.Float64:
+ return v.Float(), true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return float64(v.Uint()), true
+ case reflect.Bool:
+ if v.Bool() {
+ return 1, true
+ }
+ return 0, true
+ }
+ return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+ switch a.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return a.Int() < b.Int()
+ case reflect.Float32, reflect.Float64:
+ return a.Float() < b.Float()
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return a.Uint() < b.Uint()
+ case reflect.Bool:
+ return !a.Bool() && b.Bool()
+ }
+ panic("not a number")
+}
diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go
new file mode 100644
index 0000000..b8a116b
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/writerc.go
@@ -0,0 +1,48 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("write handler not set")
+ }
+
+ // Check if the buffer is empty.
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+}
diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go
new file mode 100644
index 0000000..8cec6da
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yaml.go
@@ -0,0 +1,698 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+// https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode/utf8"
+)
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document.
+type Unmarshaler interface {
+ UnmarshalYAML(value *Node) error
+}
+
+type obsoleteUnmarshaler interface {
+ UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+ MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// var t T
+// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+ return unmarshal(in, out, false)
+}
+
+// A Decoder reads and decodes YAML values from an input stream.
+type Decoder struct {
+ parser *parser
+ knownFields bool
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may read
+// data from r beyond the YAML values requested.
+func NewDecoder(r io.Reader) *Decoder {
+ return &Decoder{
+ parser: newParserFromReader(r),
+ }
+}
+
+// KnownFields ensures that the keys in decoded mappings to
+// exist as fields in the struct being decoded into.
+func (dec *Decoder) KnownFields(enable bool) {
+ dec.knownFields = enable
+}
+
+// Decode reads the next YAML-encoded value from its input
+// and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (dec *Decoder) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ d.knownFields = dec.knownFields
+ defer handleErr(&err)
+ node := dec.parser.parse()
+ if node == nil {
+ return io.EOF
+ }
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(node, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Decode decodes the node and stores its data into the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about the
+// conversion of YAML into a Go value.
+func (n *Node) Decode(v interface{}) (err error) {
+ d := newDecoder()
+ defer handleErr(&err)
+ out := reflect.ValueOf(v)
+ if out.Kind() == reflect.Ptr && !out.IsNil() {
+ out = out.Elem()
+ }
+ d.unmarshal(n, out)
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+func unmarshal(in []byte, out interface{}, strict bool) (err error) {
+ defer handleErr(&err)
+ d := newDecoder()
+ p := newParser(in)
+ defer p.destroy()
+ node := p.parse()
+ if node != nil {
+ v := reflect.ValueOf(out)
+ if v.Kind() == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ }
+ d.unmarshal(node, v)
+ }
+ if len(d.terrors) > 0 {
+ return &TypeError{d.terrors}
+ }
+ return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only marshalled if they are exported (have an upper case
+// first letter), and are marshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+// `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+// omitempty Only include the field if it's not set to the zero
+// value for the type or to empty slices or maps.
+// Zero valued structs will be omitted if all their public
+// fields are zero, unless they implement an IsZero
+// method (see the IsZeroer interface type), in which
+// case the field will be excluded if IsZero returns true.
+//
+// flow Marshal using a flow style (useful for structs,
+// sequences and maps).
+//
+// inline Inline the field, which must be a struct or a map,
+// causing all of its fields or keys to be processed as if
+// they were part of the outer struct. For maps, keys must
+// not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+// type T struct {
+// F int `yaml:"a,omitempty"`
+// B int
+// }
+// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(in))
+ e.finish()
+ out = e.out
+ return
+}
+
+// An Encoder writes YAML values to an output stream.
+type Encoder struct {
+ encoder *encoder
+}
+
+// NewEncoder returns a new encoder that writes to w.
+// The Encoder should be closed after use to flush all data
+// to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return &Encoder{
+ encoder: newEncoderWithWriter(w),
+ }
+}
+
+// Encode writes the YAML encoding of v to the stream.
+// If multiple items are encoded to the stream, the
+// second and subsequent document will be preceded
+// with a "---" document separator, but the first will not.
+//
+// See the documentation for Marshal for details about the conversion of Go
+// values to YAML.
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e.encoder.marshalDoc("", reflect.ValueOf(v))
+ return nil
+}
+
+// Encode encodes value v and stores its representation in n.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values into YAML.
+func (n *Node) Encode(v interface{}) (err error) {
+ defer handleErr(&err)
+ e := newEncoder()
+ defer e.destroy()
+ e.marshalDoc("", reflect.ValueOf(v))
+ e.finish()
+ p := newParser(e.out)
+ p.textless = true
+ defer p.destroy()
+ doc := p.parse()
+ *n = *doc.Content[0]
+ return nil
+}
+
+// SetIndent changes the used indentation used when encoding.
+func (e *Encoder) SetIndent(spaces int) {
+ if spaces < 0 {
+ panic("yaml: cannot indent to a negative number of spaces")
+ }
+ e.encoder.indent = spaces
+}
+
+// Close closes the encoder by writing any remaining data.
+// It does not write a stream terminating string "...".
+func (e *Encoder) Close() (err error) {
+ defer handleErr(&err)
+ e.encoder.finish()
+ return nil
+}
+
+func handleErr(err *error) {
+ if v := recover(); v != nil {
+ if e, ok := v.(yamlError); ok {
+ *err = e.err
+ } else {
+ panic(v)
+ }
+ }
+}
+
+type yamlError struct {
+ err error
+}
+
+func fail(err error) {
+ panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+ panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+ Errors []string
+}
+
+func (e *TypeError) Error() string {
+ return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
+}
+
+type Kind uint32
+
+const (
+ DocumentNode Kind = 1 << iota
+ SequenceNode
+ MappingNode
+ ScalarNode
+ AliasNode
+)
+
+type Style uint32
+
+const (
+ TaggedStyle Style = 1 << iota
+ DoubleQuotedStyle
+ SingleQuotedStyle
+ LiteralStyle
+ FoldedStyle
+ FlowStyle
+)
+
+// Node represents an element in the YAML document hierarchy. While documents
+// are typically encoded and decoded into higher level types, such as structs
+// and maps, Node is an intermediate representation that allows detailed
+// control over the content being decoded or encoded.
+//
+// It's worth noting that although Node offers access into details such as
+// line numbers, colums, and comments, the content when re-encoded will not
+// have its original textual representation preserved. An effort is made to
+// render the data plesantly, and to preserve comments near the data they
+// describe, though.
+//
+// Values that make use of the Node type interact with the yaml package in the
+// same way any other type would do, by encoding and decoding yaml data
+// directly or indirectly into them.
+//
+// For example:
+//
+// var person struct {
+// Name string
+// Address yaml.Node
+// }
+// err := yaml.Unmarshal(data, &person)
+//
+// Or by itself:
+//
+// var person Node
+// err := yaml.Unmarshal(data, &person)
+//
+type Node struct {
+ // Kind defines whether the node is a document, a mapping, a sequence,
+ // a scalar value, or an alias to another node. The specific data type of
+ // scalar nodes may be obtained via the ShortTag and LongTag methods.
+ Kind Kind
+
+ // Style allows customizing the apperance of the node in the tree.
+ Style Style
+
+ // Tag holds the YAML tag defining the data type for the value.
+ // When decoding, this field will always be set to the resolved tag,
+ // even when it wasn't explicitly provided in the YAML content.
+ // When encoding, if this field is unset the value type will be
+ // implied from the node properties, and if it is set, it will only
+ // be serialized into the representation if TaggedStyle is used or
+ // the implicit tag diverges from the provided one.
+ Tag string
+
+ // Value holds the unescaped and unquoted represenation of the value.
+ Value string
+
+ // Anchor holds the anchor name for this node, which allows aliases to point to it.
+ Anchor string
+
+ // Alias holds the node that this alias points to. Only valid when Kind is AliasNode.
+ Alias *Node
+
+ // Content holds contained nodes for documents, mappings, and sequences.
+ Content []*Node
+
+ // HeadComment holds any comments in the lines preceding the node and
+ // not separated by an empty line.
+ HeadComment string
+
+ // LineComment holds any comments at the end of the line where the node is in.
+ LineComment string
+
+ // FootComment holds any comments following the node and before empty lines.
+ FootComment string
+
+ // Line and Column hold the node position in the decoded YAML text.
+ // These fields are not respected when encoding the node.
+ Line int
+ Column int
+}
+
+// IsZero returns whether the node has all of its fields unset.
+func (n *Node) IsZero() bool {
+ return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil &&
+ n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0
+}
+
+
+// LongTag returns the long form of the tag that indicates the data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) LongTag() string {
+ return longTag(n.ShortTag())
+}
+
+// ShortTag returns the short form of the YAML tag that indicates data type for
+// the node. If the Tag field isn't explicitly defined, one will be computed
+// based on the node properties.
+func (n *Node) ShortTag() string {
+ if n.indicatedString() {
+ return strTag
+ }
+ if n.Tag == "" || n.Tag == "!" {
+ switch n.Kind {
+ case MappingNode:
+ return mapTag
+ case SequenceNode:
+ return seqTag
+ case AliasNode:
+ if n.Alias != nil {
+ return n.Alias.ShortTag()
+ }
+ case ScalarNode:
+ tag, _ := resolve("", n.Value)
+ return tag
+ case 0:
+ // Special case to make the zero value convenient.
+ if n.IsZero() {
+ return nullTag
+ }
+ }
+ return ""
+ }
+ return shortTag(n.Tag)
+}
+
+func (n *Node) indicatedString() bool {
+ return n.Kind == ScalarNode &&
+ (shortTag(n.Tag) == strTag ||
+ (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0)
+}
+
+// SetString is a convenience function that sets the node to a string value
+// and defines its style in a pleasant way depending on its content.
+func (n *Node) SetString(s string) {
+ n.Kind = ScalarNode
+ if utf8.ValidString(s) {
+ n.Value = s
+ n.Tag = strTag
+ } else {
+ n.Value = encodeBase64(s)
+ n.Tag = binaryTag
+ }
+ if strings.Contains(n.Value, "\n") {
+ n.Style = LiteralStyle
+ }
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+ FieldsMap map[string]fieldInfo
+ FieldsList []fieldInfo
+
+ // InlineMap is the number of the field in the struct that
+ // contains an ,inline map, or -1 if there's none.
+ InlineMap int
+
+ // InlineUnmarshalers holds indexes to inlined fields that
+ // contain unmarshaler values.
+ InlineUnmarshalers [][]int
+}
+
+type fieldInfo struct {
+ Key string
+ Num int
+ OmitEmpty bool
+ Flow bool
+ // Id holds the unique field identifier, so we can cheaply
+ // check for field duplicates without maintaining an extra map.
+ Id int
+
+ // Inline holds the field index if the field is part of an inlined struct.
+ Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+var unmarshalerType reflect.Type
+
+func init() {
+ var v Unmarshaler
+ unmarshalerType = reflect.ValueOf(&v).Elem().Type()
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+ fieldMapMutex.RLock()
+ sinfo, found := structMap[st]
+ fieldMapMutex.RUnlock()
+ if found {
+ return sinfo, nil
+ }
+
+ n := st.NumField()
+ fieldsMap := make(map[string]fieldInfo)
+ fieldsList := make([]fieldInfo, 0, n)
+ inlineMap := -1
+ inlineUnmarshalers := [][]int(nil)
+ for i := 0; i != n; i++ {
+ field := st.Field(i)
+ if field.PkgPath != "" && !field.Anonymous {
+ continue // Private field
+ }
+
+ info := fieldInfo{Num: i}
+
+ tag := field.Tag.Get("yaml")
+ if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+ tag = string(field.Tag)
+ }
+ if tag == "-" {
+ continue
+ }
+
+ inline := false
+ fields := strings.Split(tag, ",")
+ if len(fields) > 1 {
+ for _, flag := range fields[1:] {
+ switch flag {
+ case "omitempty":
+ info.OmitEmpty = true
+ case "flow":
+ info.Flow = true
+ case "inline":
+ inline = true
+ default:
+ return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st))
+ }
+ }
+ tag = fields[0]
+ }
+
+ if inline {
+ switch field.Type.Kind() {
+ case reflect.Map:
+ if inlineMap >= 0 {
+ return nil, errors.New("multiple ,inline maps in struct " + st.String())
+ }
+ if field.Type.Key() != reflect.TypeOf("") {
+ return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String())
+ }
+ inlineMap = info.Num
+ case reflect.Struct, reflect.Ptr:
+ ftype := field.Type
+ for ftype.Kind() == reflect.Ptr {
+ ftype = ftype.Elem()
+ }
+ if ftype.Kind() != reflect.Struct {
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ if reflect.PtrTo(ftype).Implements(unmarshalerType) {
+ inlineUnmarshalers = append(inlineUnmarshalers, []int{i})
+ } else {
+ sinfo, err := getStructInfo(ftype)
+ if err != nil {
+ return nil, err
+ }
+ for _, index := range sinfo.InlineUnmarshalers {
+ inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...))
+ }
+ for _, finfo := range sinfo.FieldsList {
+ if _, found := fieldsMap[finfo.Key]; found {
+ msg := "duplicated key '" + finfo.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+ if finfo.Inline == nil {
+ finfo.Inline = []int{i, finfo.Num}
+ } else {
+ finfo.Inline = append([]int{i}, finfo.Inline...)
+ }
+ finfo.Id = len(fieldsList)
+ fieldsMap[finfo.Key] = finfo
+ fieldsList = append(fieldsList, finfo)
+ }
+ }
+ default:
+ return nil, errors.New("option ,inline may only be used on a struct or map field")
+ }
+ continue
+ }
+
+ if tag != "" {
+ info.Key = tag
+ } else {
+ info.Key = strings.ToLower(field.Name)
+ }
+
+ if _, found = fieldsMap[info.Key]; found {
+ msg := "duplicated key '" + info.Key + "' in struct " + st.String()
+ return nil, errors.New(msg)
+ }
+
+ info.Id = len(fieldsList)
+ fieldsList = append(fieldsList, info)
+ fieldsMap[info.Key] = info
+ }
+
+ sinfo = &structInfo{
+ FieldsMap: fieldsMap,
+ FieldsList: fieldsList,
+ InlineMap: inlineMap,
+ InlineUnmarshalers: inlineUnmarshalers,
+ }
+
+ fieldMapMutex.Lock()
+ structMap[st] = sinfo
+ fieldMapMutex.Unlock()
+ return sinfo, nil
+}
+
+// IsZeroer is used to check whether an object is zero to
+// determine whether it should be omitted when marshaling
+// with the omitempty flag. One notable implementation
+// is time.Time.
+type IsZeroer interface {
+ IsZero() bool
+}
+
+func isZero(v reflect.Value) bool {
+ kind := v.Kind()
+ if z, ok := v.Interface().(IsZeroer); ok {
+ if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
+ return true
+ }
+ return z.IsZero()
+ }
+ switch kind {
+ case reflect.String:
+ return len(v.String()) == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ case reflect.Slice:
+ return v.Len() == 0
+ case reflect.Map:
+ return v.Len() == 0
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Struct:
+ vt := v.Type()
+ for i := v.NumField() - 1; i >= 0; i-- {
+ if vt.Field(i).PkgPath != "" {
+ continue // Private field
+ }
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go
new file mode 100644
index 0000000..7c6d007
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlh.go
@@ -0,0 +1,807 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+import (
+ "fmt"
+ "io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+ major int8 // The major version number.
+ minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle.
+ prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+ // Let the parser choose the encoding.
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+
+ yaml_UTF8_ENCODING // The default UTF-8 encoding.
+ yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+ yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+ // Let the parser choose the break type.
+ yaml_ANY_BREAK yaml_break_t = iota
+
+ yaml_CR_BREAK // Use CR for line breaks (Mac style).
+ yaml_LN_BREAK // Use LN for line breaks (Unix style).
+ yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+ // No error is produced.
+ yaml_NO_ERROR yaml_error_type_t = iota
+
+ yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
+ yaml_READER_ERROR // Cannot read or decode the input stream.
+ yaml_SCANNER_ERROR // Cannot scan the input stream.
+ yaml_PARSER_ERROR // Cannot parse the input stream.
+ yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+ yaml_WRITER_ERROR // Cannot write to the output stream.
+ yaml_EMITTER_ERROR // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+ index int // The position index.
+ line int // The position line.
+ column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0
+
+ yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style.
+ yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+ yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
+ yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+ yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+ // Let the emitter choose the style.
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+ yaml_FLOW_MAPPING_STYLE // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+ // An empty token.
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ yaml_STREAM_START_TOKEN // A STREAM-START token.
+ yaml_STREAM_END_TOKEN // A STREAM-END token.
+
+ yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+ yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
+ yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
+ yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
+
+ yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+ yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
+ yaml_BLOCK_END_TOKEN // A BLOCK-END token.
+
+ yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+ yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
+ yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
+ yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
+
+ yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+ yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
+ yaml_KEY_TOKEN // A KEY token.
+ yaml_VALUE_TOKEN // A VALUE token.
+
+ yaml_ALIAS_TOKEN // An ALIAS token.
+ yaml_ANCHOR_TOKEN // An ANCHOR token.
+ yaml_TAG_TOKEN // A TAG token.
+ yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+ switch tt {
+ case yaml_NO_TOKEN:
+ return "yaml_NO_TOKEN"
+ case yaml_STREAM_START_TOKEN:
+ return "yaml_STREAM_START_TOKEN"
+ case yaml_STREAM_END_TOKEN:
+ return "yaml_STREAM_END_TOKEN"
+ case yaml_VERSION_DIRECTIVE_TOKEN:
+ return "yaml_VERSION_DIRECTIVE_TOKEN"
+ case yaml_TAG_DIRECTIVE_TOKEN:
+ return "yaml_TAG_DIRECTIVE_TOKEN"
+ case yaml_DOCUMENT_START_TOKEN:
+ return "yaml_DOCUMENT_START_TOKEN"
+ case yaml_DOCUMENT_END_TOKEN:
+ return "yaml_DOCUMENT_END_TOKEN"
+ case yaml_BLOCK_SEQUENCE_START_TOKEN:
+ return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+ case yaml_BLOCK_MAPPING_START_TOKEN:
+ return "yaml_BLOCK_MAPPING_START_TOKEN"
+ case yaml_BLOCK_END_TOKEN:
+ return "yaml_BLOCK_END_TOKEN"
+ case yaml_FLOW_SEQUENCE_START_TOKEN:
+ return "yaml_FLOW_SEQUENCE_START_TOKEN"
+ case yaml_FLOW_SEQUENCE_END_TOKEN:
+ return "yaml_FLOW_SEQUENCE_END_TOKEN"
+ case yaml_FLOW_MAPPING_START_TOKEN:
+ return "yaml_FLOW_MAPPING_START_TOKEN"
+ case yaml_FLOW_MAPPING_END_TOKEN:
+ return "yaml_FLOW_MAPPING_END_TOKEN"
+ case yaml_BLOCK_ENTRY_TOKEN:
+ return "yaml_BLOCK_ENTRY_TOKEN"
+ case yaml_FLOW_ENTRY_TOKEN:
+ return "yaml_FLOW_ENTRY_TOKEN"
+ case yaml_KEY_TOKEN:
+ return "yaml_KEY_TOKEN"
+ case yaml_VALUE_TOKEN:
+ return "yaml_VALUE_TOKEN"
+ case yaml_ALIAS_TOKEN:
+ return "yaml_ALIAS_TOKEN"
+ case yaml_ANCHOR_TOKEN:
+ return "yaml_ANCHOR_TOKEN"
+ case yaml_TAG_TOKEN:
+ return "yaml_TAG_TOKEN"
+ case yaml_SCALAR_TOKEN:
+ return "yaml_SCALAR_TOKEN"
+ }
+ return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+ // The token type.
+ typ yaml_token_type_t
+
+ // The start/end of the token.
+ start_mark, end_mark yaml_mark_t
+
+ // The stream encoding (for yaml_STREAM_START_TOKEN).
+ encoding yaml_encoding_t
+
+ // The alias/anchor/scalar value or tag/tag directive handle
+ // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+ value []byte
+
+ // The tag suffix (for yaml_TAG_TOKEN).
+ suffix []byte
+
+ // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+ prefix []byte
+
+ // The scalar style (for yaml_SCALAR_TOKEN).
+ style yaml_scalar_style_t
+
+ // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+ major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+ // An empty event.
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ yaml_STREAM_START_EVENT // A STREAM-START event.
+ yaml_STREAM_END_EVENT // A STREAM-END event.
+ yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+ yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
+ yaml_ALIAS_EVENT // An ALIAS event.
+ yaml_SCALAR_EVENT // A SCALAR event.
+ yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+ yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
+ yaml_MAPPING_START_EVENT // A MAPPING-START event.
+ yaml_MAPPING_END_EVENT // A MAPPING-END event.
+ yaml_TAIL_COMMENT_EVENT
+)
+
+var eventStrings = []string{
+ yaml_NO_EVENT: "none",
+ yaml_STREAM_START_EVENT: "stream start",
+ yaml_STREAM_END_EVENT: "stream end",
+ yaml_DOCUMENT_START_EVENT: "document start",
+ yaml_DOCUMENT_END_EVENT: "document end",
+ yaml_ALIAS_EVENT: "alias",
+ yaml_SCALAR_EVENT: "scalar",
+ yaml_SEQUENCE_START_EVENT: "sequence start",
+ yaml_SEQUENCE_END_EVENT: "sequence end",
+ yaml_MAPPING_START_EVENT: "mapping start",
+ yaml_MAPPING_END_EVENT: "mapping end",
+ yaml_TAIL_COMMENT_EVENT: "tail comment",
+}
+
+func (e yaml_event_type_t) String() string {
+ if e < 0 || int(e) >= len(eventStrings) {
+ return fmt.Sprintf("unknown event %d", e)
+ }
+ return eventStrings[e]
+}
+
+// The event structure.
+type yaml_event_t struct {
+
+ // The event type.
+ typ yaml_event_type_t
+
+ // The start and end of the event.
+ start_mark, end_mark yaml_mark_t
+
+ // The document encoding (for yaml_STREAM_START_EVENT).
+ encoding yaml_encoding_t
+
+ // The version directive (for yaml_DOCUMENT_START_EVENT).
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+ tag_directives []yaml_tag_directive_t
+
+ // The comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+ anchor []byte
+
+ // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ tag []byte
+
+ // The scalar value (for yaml_SCALAR_EVENT).
+ value []byte
+
+ // Is the document start/end indicator implicit, or the tag optional?
+ // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+ implicit bool
+
+ // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+ quoted_implicit bool
+
+ // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+ style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+ yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
+ yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
+ yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+ yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+ // Not in original libyaml.
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+ yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
+
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+ // An empty node.
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ yaml_SCALAR_NODE // A scalar node.
+ yaml_SEQUENCE_NODE // A sequence node.
+ yaml_MAPPING_NODE // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+ key int // The key of the element.
+ value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+ typ yaml_node_type_t // The node type.
+ tag []byte // The node tag.
+
+ // The node data.
+
+ // The scalar parameters (for yaml_SCALAR_NODE).
+ scalar struct {
+ value []byte // The scalar value.
+ length int // The length of the scalar value.
+ style yaml_scalar_style_t // The scalar style.
+ }
+
+ // The sequence parameters (for YAML_SEQUENCE_NODE).
+ sequence struct {
+ items_data []yaml_node_item_t // The stack of sequence items.
+ style yaml_sequence_style_t // The sequence style.
+ }
+
+ // The mapping parameters (for yaml_MAPPING_NODE).
+ mapping struct {
+ pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
+ pairs_start *yaml_node_pair_t // The beginning of the stack.
+ pairs_end *yaml_node_pair_t // The end of the stack.
+ pairs_top *yaml_node_pair_t // The top of the stack.
+ style yaml_mapping_style_t // The mapping style.
+ }
+
+ start_mark yaml_mark_t // The beginning of the node.
+ end_mark yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+ // The document nodes.
+ nodes []yaml_node_t
+
+ // The version directive.
+ version_directive *yaml_version_directive_t
+
+ // The list of tag directives.
+ tag_directives_data []yaml_tag_directive_t
+ tag_directives_start int // The beginning of the tag directives list.
+ tag_directives_end int // The end of the tag directives list.
+
+ start_implicit int // Is the document start indicator implicit?
+ end_implicit int // Is the document end indicator implicit?
+
+ // The start/end of the document.
+ start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out] data A pointer to an application data specified by
+// yaml_parser_set_input().
+// [out] buffer The buffer to write the data from the source.
+// [in] size The size of the buffer.
+// [out] size_read The actual number of bytes read from the source.
+//
+// On success, the handler should return 1. If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+ possible bool // Is a simple key possible?
+ required bool // Is a simple key required?
+ token_number int // The number of the token.
+ mark yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
+ yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
+ yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+ yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
+ yaml_PARSE_END_STATE // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+ switch ps {
+ case yaml_PARSE_STREAM_START_STATE:
+ return "yaml_PARSE_STREAM_START_STATE"
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return "yaml_PARSE_DOCUMENT_START_STATE"
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return "yaml_PARSE_DOCUMENT_END_STATE"
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_STATE"
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return "yaml_PARSE_FLOW_NODE_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+ case yaml_PARSE_END_STATE:
+ return "yaml_PARSE_END_STATE"
+ }
+ return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+ anchor []byte // The anchor.
+ index int // The node id.
+ mark yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+
+ problem string // Error description.
+
+ // The byte about which the problem occurred.
+ problem_offset int
+ problem_value int
+ problem_mark yaml_mark_t
+
+ // The error context.
+ context string
+ context_mark yaml_mark_t
+
+ // Reader stuff
+
+ read_handler yaml_read_handler_t // Read handler.
+
+ input_reader io.Reader // File input data.
+ input []byte // String input data.
+ input_pos int
+
+ eof bool // EOF flag
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ unread int // The number of unread characters in the buffer.
+
+ newlines int // The number of line breaks since last non-break/non-blank character
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The input encoding.
+
+ offset int // The offset of the current position (in bytes).
+ mark yaml_mark_t // The mark of the current position.
+
+ // Comments
+
+ head_comment []byte // The current head comments
+ line_comment []byte // The current line comments
+ foot_comment []byte // The current foot comments
+ tail_comment []byte // Foot comment that happens at the end of a block.
+ stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc)
+
+ comments []yaml_comment_t // The folded comments for all parsed tokens
+ comments_head int
+
+ // Scanner stuff
+
+ stream_start_produced bool // Have we started to scan the input stream?
+ stream_end_produced bool // Have we reached the end of the input stream?
+
+ flow_level int // The number of unclosed '[' and '{' indicators.
+
+ tokens []yaml_token_t // The tokens queue.
+ tokens_head int // The head of the tokens queue.
+ tokens_parsed int // The number of tokens fetched from the queue.
+ token_available bool // Does the tokens queue contain a token ready for dequeueing.
+
+ indent int // The current indentation level.
+ indents []int // The indentation levels stack.
+
+ simple_key_allowed bool // May a simple key occur at the current position?
+ simple_keys []yaml_simple_key_t // The stack of simple keys.
+ simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number
+
+ // Parser stuff
+
+ state yaml_parser_state_t // The current parser state.
+ states []yaml_parser_state_t // The parser states stack.
+ marks []yaml_mark_t // The stack of marks.
+ tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+ // Dumper stuff
+
+ aliases []yaml_alias_data_t // The alias data.
+
+ document *yaml_document_t // The currently parsed document.
+}
+
+type yaml_comment_t struct {
+
+ scan_mark yaml_mark_t // Position where scanning for comments started
+ token_mark yaml_mark_t // Position after which tokens will be associated with this comment
+ start_mark yaml_mark_t // Position of '#' comment mark
+ end_mark yaml_mark_t // Position where comment terminated
+
+ head []byte
+ line []byte
+ foot []byte
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output. The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out] data A pointer to an application data specified by
+// yaml_emitter_set_output().
+// @param[in] buffer The buffer with bytes to be written.
+// @param[in] size The size of the buffer.
+//
+// @returns On success, the handler should return @c 1. If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+ // Expect STREAM-START.
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
+ yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
+ yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
+ yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
+ yaml_EMIT_END_STATE // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal. Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+ // Error handling
+
+ error yaml_error_type_t // Error type.
+ problem string // Error description.
+
+ // Writer stuff
+
+ write_handler yaml_write_handler_t // Write handler.
+
+ output_buffer *[]byte // String output data.
+ output_writer io.Writer // File output data.
+
+ buffer []byte // The working buffer.
+ buffer_pos int // The current position of the buffer.
+
+ raw_buffer []byte // The raw buffer.
+ raw_buffer_pos int // The current position of the buffer.
+
+ encoding yaml_encoding_t // The stream encoding.
+
+ // Emitter stuff
+
+ canonical bool // If the output is in the canonical style?
+ best_indent int // The number of indentation spaces.
+ best_width int // The preferred width of the output lines.
+ unicode bool // Allow unescaped non-ASCII characters?
+ line_break yaml_break_t // The preferred line break.
+
+ state yaml_emitter_state_t // The current emitter state.
+ states []yaml_emitter_state_t // The stack of states.
+
+ events []yaml_event_t // The event queue.
+ events_head int // The head of the event queue.
+
+ indents []int // The stack of indentation levels.
+
+ tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+ indent int // The current indentation level.
+
+ flow_level int // The current flow level.
+
+ root_context bool // Is it the document root context?
+ sequence_context bool // Is it a sequence context?
+ mapping_context bool // Is it a mapping context?
+ simple_key_context bool // Is it a simple mapping key context?
+
+ line int // The current line.
+ column int // The current column.
+ whitespace bool // If the last character was a whitespace?
+ indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
+ open_ended bool // If an explicit document end is required?
+
+ space_above bool // Is there's an empty line above?
+ foot_indent int // The indent used to write the foot comment above, or -1 if none.
+
+ // Anchor analysis.
+ anchor_data struct {
+ anchor []byte // The anchor value.
+ alias bool // Is it an alias?
+ }
+
+ // Tag analysis.
+ tag_data struct {
+ handle []byte // The tag handle.
+ suffix []byte // The tag suffix.
+ }
+
+ // Scalar analysis.
+ scalar_data struct {
+ value []byte // The scalar value.
+ multiline bool // Does the scalar contain line breaks?
+ flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
+ block_plain_allowed bool // Can the scalar be expressed in the block plain style?
+ single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
+ block_allowed bool // Can the scalar be expressed in the literal or folded styles?
+ style yaml_scalar_style_t // The output style.
+ }
+
+ // Comments
+ head_comment []byte
+ line_comment []byte
+ foot_comment []byte
+ tail_comment []byte
+
+ key_line_comment []byte
+
+ // Dumper stuff
+
+ opened bool // If the stream was already opened?
+ closed bool // If the stream was already closed?
+
+ // The information associated with the document nodes.
+ anchors *struct {
+ references int // The number of references.
+ anchor int // The anchor id.
+ serialized bool // If the node has been emitted?
+ }
+
+ last_anchor_id int // The last assigned anchor id.
+
+ document *yaml_document_t // The currently emitted document.
+}
diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
new file mode 100644
index 0000000..e88f9c5
--- /dev/null
+++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go
@@ -0,0 +1,198 @@
+//
+// Copyright (c) 2011-2019 Canonical Ltd
+// Copyright (c) 2006-2010 Kirill Simonov
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+// of the Software, and to permit persons to whom the Software is furnished to do
+// so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package yaml
+
+const (
+ // The size of the input raw buffer.
+ input_raw_buffer_size = 512
+
+ // The size of the input buffer.
+ // It should be possible to decode the whole raw buffer.
+ input_buffer_size = input_raw_buffer_size * 3
+
+ // The size of the output buffer.
+ output_buffer_size = 128
+
+ // The size of the output raw buffer.
+ // It should be possible to encode the whole output buffer.
+ output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+ // The size of other stacks and queues.
+ initial_stack_size = 16
+ initial_queue_size = 16
+ initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+ return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+ return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+ bi := b[i]
+ if bi >= 'A' && bi <= 'F' {
+ return int(bi) - 'A' + 10
+ }
+ if bi >= 'a' && bi <= 'f' {
+ return int(bi) - 'a' + 10
+ }
+ return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+ return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || // . == #x0A
+ (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+ return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+ return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+ return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+ return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+ //return is_space(b, i) || is_tab(b, i)
+ return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+ return (b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+ //return is_break(b, i) || is_z(b, i)
+ return (
+ // is_break:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ // is_z:
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+ //return is_space(b, i) || is_breakz(b, i)
+ return (
+ // is_space:
+ b[i] == ' ' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+ //return is_blank(b, i) || is_breakz(b, i)
+ return (
+ // is_blank:
+ b[i] == ' ' || b[i] == '\t' ||
+ // is_breakz:
+ b[i] == '\r' || // CR (#xD)
+ b[i] == '\n' || // LF (#xA)
+ b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+ b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+ b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+ // Don't replace these by a switch without first
+ // confirming that it is being inlined.
+ if b&0x80 == 0x00 {
+ return 1
+ }
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+ return 0
+
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
new file mode 100644
index 0000000..aca6d31
--- /dev/null
+++ b/vendor/modules.txt
@@ -0,0 +1,55 @@
+# github.com/davecgh/go-spew v1.1.1
+github.com/davecgh/go-spew/spew
+# github.com/opencord/voltha-lib-go/v7 v7.1.5
+## explicit
+github.com/opencord/voltha-lib-go/v7/pkg/log
+github.com/opencord/voltha-lib-go/v7/pkg/probe
+github.com/opencord/voltha-lib-go/v7/pkg/version
+# github.com/opentracing/opentracing-go v1.2.0
+github.com/opentracing/opentracing-go
+github.com/opentracing/opentracing-go/ext
+github.com/opentracing/opentracing-go/log
+# github.com/pkg/errors v0.9.1
+github.com/pkg/errors
+# github.com/pmezard/go-difflib v1.0.0
+github.com/pmezard/go-difflib/difflib
+# github.com/stretchr/testify v1.7.0
+## explicit
+github.com/stretchr/testify/assert
+# github.com/uber/jaeger-client-go v2.29.1+incompatible
+github.com/uber/jaeger-client-go
+github.com/uber/jaeger-client-go/config
+github.com/uber/jaeger-client-go/internal/baggage
+github.com/uber/jaeger-client-go/internal/baggage/remote
+github.com/uber/jaeger-client-go/internal/reporterstats
+github.com/uber/jaeger-client-go/internal/spanlog
+github.com/uber/jaeger-client-go/internal/throttler
+github.com/uber/jaeger-client-go/internal/throttler/remote
+github.com/uber/jaeger-client-go/log
+github.com/uber/jaeger-client-go/rpcmetrics
+github.com/uber/jaeger-client-go/thrift
+github.com/uber/jaeger-client-go/thrift-gen/agent
+github.com/uber/jaeger-client-go/thrift-gen/baggage
+github.com/uber/jaeger-client-go/thrift-gen/jaeger
+github.com/uber/jaeger-client-go/thrift-gen/sampling
+github.com/uber/jaeger-client-go/thrift-gen/zipkincore
+github.com/uber/jaeger-client-go/transport
+github.com/uber/jaeger-client-go/utils
+# github.com/uber/jaeger-lib v2.4.1+incompatible
+github.com/uber/jaeger-lib/metrics
+# go.uber.org/atomic v1.7.0
+go.uber.org/atomic
+# go.uber.org/multierr v1.6.0
+go.uber.org/multierr
+# go.uber.org/zap v1.18.1
+go.uber.org/zap
+go.uber.org/zap/buffer
+go.uber.org/zap/internal/bufferpool
+go.uber.org/zap/internal/color
+go.uber.org/zap/internal/exit
+go.uber.org/zap/zapcore
+# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
+gopkg.in/yaml.v3
+# github.com/coreos/bbolt v1.3.4 => go.etcd.io/bbolt v1.3.4
+# go.etcd.io/bbolt v1.3.4 => github.com/coreos/bbolt v1.3.4
+# google.golang.org/grpc => google.golang.org/grpc v1.25.1